wangkun преди 2 години
родител
ревизия
1a63636db0

+ 12 - 0
README.MD

@@ -171,4 +171,16 @@ sh ./main/main.sh ./douyin/douyin_main/run_douyin_recommend.py --log_type="recom
 阿里云 102 服务器:/usr/bin/sh /data5/piaoquan_crawler/main/process.sh "prod"
 阿里云 102 服务器:/usr/bin/sh /data5/piaoquan_crawler/main/process.sh "prod"
 香港 服务器:/usr/bin/sh /root/piaoquan_crawler/main/process.sh "hk"
 香港 服务器:/usr/bin/sh /root/piaoquan_crawler/main/process.sh "hk"
 线下调试:sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process.sh "dev"
 线下调试:sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process.sh "dev"
+```
+
+
+#### 本山祝福小程序
+```commandline
+阿里云 102 服务器
+/usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./benshanzhufu/benshanzhufu_main/run_benshanzhufu_recommend.py --log_type="recommend" --crawler="benshanzhufu" --env="prod"  ./benshanzhufu/logs/nohup-recommend.log
+线下调试
+sh ./main/scheduling_main.sh ./benshanzhufu/benshanzhufu_main/run_benshanzhufu_recommend.py --log_type="recommend" --crawler="benshanzhufu" --env="dev"  ./benshanzhufu/logs/nohup-recommend.log
+检测进程
+ps aux | grep run_benshanzhufu
+ps aux | grep run_benshanzhufu | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 ```

BIN
benshanzhufu/.DS_Store


+ 3 - 0
benshanzhufu/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 3 - 0
benshanzhufu/benshanzhufu_main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 28 - 0
benshanzhufu/benshanzhufu_main/run_benshanzhufu_recommend.py

@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from benshanzhufu.benshanzhufu_recommend.benshanzhufu_recommend import BenshanzhufuRecommend
+
+def main(log_type, crawler, env):
+    if env == "dev":
+        oss_endpoint = "out"
+    else:
+        oss_endpoint = "inner"
+    Common.logger(log_type, crawler).info('开始抓取 本山祝福小程序\n')
+    BenshanzhufuRecommend.get_videoList(log_type, crawler, oss_endpoint, env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type, crawler=args.crawler, env=args.env)

+ 3 - 0
benshanzhufu/benshanzhufu_recommend/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 264 - 0
benshanzhufu/benshanzhufu_recommend/benshanzhufu_recommend.py

@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/4/25
+import json
+import os
+import random
+import shutil
+import sys
+import time
+from hashlib import md5
+from urllib import parse
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.feishu import Feishu
+from common.publish import Publish
+proxies = {"http": None, "https": None}
+
+
+class BenshanzhufuRecommend:
+    # 翻页参数
+    visitor_key = ""
+    page = 1
+    platform = "本山祝福"
+
+    # 过滤词库
+    @classmethod
+    def benshanzhufu_config(cls, log_type, crawler, text, env):
+        select_sql = f"""select * from crawler_config where source="benshanzhufu" """
+        contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
+        title_list = []
+        filter_list = []
+        for content in contents:
+            config = content['config']
+            config_dict = eval(config)
+            for k, v in config_dict.items():
+                if k == "title":
+                    title_list_config = v.split(",")
+                    for title in title_list_config:
+                        title_list.append(title)
+                if k == "filter":
+                    filter_list_config = v.split(",")
+                    for filter_word in filter_list_config:
+                        filter_list.append(filter_word)
+        if text == "title":
+            return title_list
+        elif text == "filter":
+            return filter_list
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="本山祝福" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 推荐列表获取视频
+    @classmethod
+    def get_videoList(cls, log_type, crawler, oss_endpoint, env):
+        while True:
+            now = int(time.time() * 1000)
+            url = "https://bszf.wentingyou.cn/index.php/v111/index/index?parameter="
+            header = {
+                "content-time": str(now),
+                # "visitorKey": "165086930003741",
+                "chatKey": "wx0fb8149da961d3b0",
+                "cache-time": str(now),
+                "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
+                              "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 "
+                              "MicroMessenger/8.0.20(0x1800142d) NetType/WIFI Language/zh_CN",
+                "Referer": "https://servicewechat.com/wx0fb8149da961d3b0/2/page-frame.html"
+            }
+            parameter = {
+                "page": random.randint(1, 76),
+                "ini_id": cls.visitor_key
+            }
+            params = parse.quote(json.dumps(parameter))
+            url = url + str(params)
+            # try:
+            urllib3.disable_warnings()
+            r = requests.get(headers=header, url=url, proxies=proxies, verify=False)
+            if r.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.text}\n")
+                return
+            elif r.json()['message'] != "list success":
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                return
+            elif "data" not in r.json():
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                return
+            elif len(r.json()['data']["list"]) == 0:
+                Common.logger(log_type, crawler).info(f"没有更多数据了~ {r.json()}\n")
+                return
+            else:
+                # 翻页
+                cls.visitor_key = r.json()["data"]["visitor_key"]
+                cls.page += 1
+                feeds = r.json()["data"]["list"]
+                for i in range(len(feeds)):
+                    video_title = feeds[i].get("title", "").strip().replace("\n", "")\
+                            .replace("/", "").replace("本山祝福", "").replace(" ", "")\
+                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")\
+                            .replace("#", "").replace(".", "。").replace("\\", "")\
+                            .replace(":", "").replace("*", "").replace("?", "")\
+                            .replace("?", "").replace('"', "").replace("<", "")\
+                            .replace(">", "").replace("|", "").replace("'", "").replace('"', "")
+                    video_id = str(feeds[i].get("nid", ""))
+                    play_cnt = 0
+                    comment_cnt = feeds[i].get("commentCount", 0)
+                    share_cnt = 0
+                    like_cnt = 0
+                    publish_time_stamp = feeds[i].get("update_time", 0)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    user_name = "本山祝福"
+                    user_id = "benshanzhufu"
+                    cover_url = feeds[i].get("video_cover", "")
+                    video_url = feeds[i].get("video_url", "")
+                    if ".mp4" not in video_url:
+                        video_url = ""
+
+                    video_dict = {
+                        'video_title': video_title,
+                        'video_id': video_id,
+                        'play_cnt': play_cnt,
+                        'comment_cnt': comment_cnt,
+                        'like_cnt': like_cnt,
+                        'share_cnt': share_cnt,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': user_name,
+                        'user_id': user_id,
+                        'avatar_url': cover_url,
+                        'cover_url': cover_url,
+                        'video_url': video_url,
+                        'session': f"benshanzhufu-{int(time.time())}"
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    # 过滤无效视频
+                    if video_id == "" or cover_url == "" or video_url == "":
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                    elif any(str(word) if str(word) in video_title else False for word in cls.benshanzhufu_config(log_type, crawler, "filter", env)) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_id, env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    else:
+                        cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).error(f"get_videoList异常:{e}\n")
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
+        # try:
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy="推荐榜爬虫策略",
+                                                  our_uid="recommend",
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == 'dev':
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "440018", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "推荐榜爬虫策略",
+                   video_dict['video_id'],
+                   video_dict['video_title'],
+                   our_video_link,
+                   video_dict['play_cnt'],
+                   video_dict['comment_cnt'],
+                   video_dict['like_cnt'],
+                   video_dict['share_cnt'],
+                   video_dict['duration'],
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "440018", "E2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+        rule_dict = {}
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "推荐榜爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
+        #     # 删除视频文件夹
+        #     shutil.rmtree(f"./{crawler}/videos/")
+        #     return
+
+
+if __name__ == "__main__":
+    BenshanzhufuRecommend.get_videoList("recommend", "benshanzhufu", "out", "dev")
+
+    pass

+ 75 - 0
benshanzhufu/benshanzhufu_recommend/insert.py

@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import time
+from datetime import date, timedelta
+
+from common.feishu import Feishu
+from common.scheduling_db import MysqlHelper
+
+
+class Insert:
+    @classmethod
+    def get_config(cls, log_type, crawler, text, env):
+        select_sql = f"""select * from crawler_config where source="benshanzhufu" """
+        contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
+        title_list = []
+        filter_list = []
+        for content in contents:
+            config = content['config']
+            config_dict = eval(config)
+            for k, v in config_dict.items():
+                if k == "title":
+                    title_list_config = v.split(",")
+                    for title in title_list_config:
+                        title_list.append(title)
+                if k == "filter":
+                    filter_list_config = v.split(",")
+                    for filter_word in filter_list_config:
+                        filter_list.append(filter_word)
+        if text == "title":
+            return title_list
+        elif text == "filter":
+            return filter_list
+
+    @classmethod
+    def before_day(cls):
+        publish_time_str_rule = (date.today() + timedelta(days=-30)).strftime("%Y-%m-%d %H:%M:%S")
+        publish_time_stamp_rule = int(time.mktime(time.strptime(publish_time_str_rule, "%Y-%m-%d %H:%M:%S")))
+        print(publish_time_str_rule)
+        print(publish_time_stamp_rule)
+
+    @classmethod
+    def insert_config(cls, log_type, crawler, env):
+        filter_sheet = Feishu.get_values_batch(log_type, crawler, "DjXfqG")
+        # title_sheet = Feishu.get_values_batch(log_type, crawler, "bHSW1p")
+        filter_list = []
+        # title_list = []
+        for x in filter_sheet:
+            for y in x:
+                if y is None:
+                    pass
+                else:
+                    filter_list.append(y)
+        # for x in title_sheet:
+        #     for y in x:
+        #         if y is None:
+        #             pass
+        #         else:
+        #             title_list.append(y)
+        # str_title = ','.join(title_list)
+        str_filter = ','.join(filter_list)
+        config_dict = {
+            # "title": str_title,
+            "filter": str_filter
+        }
+        str_config_dict = str(config_dict)
+        # print(f"config_dict:{config_dict}")
+        # print(f"str_config_dict:{str_config_dict}")
+        insert_sql = f""" insert into crawler_config(title, source, config) values("本山祝福小程序", "benshanzhufu", "{str_config_dict}") """
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+
+
+if __name__ == "__main__":
+    # Insert.insert_config("insert", "benshanzhufu", "dev")
+    print(Insert.get_config("insert", "benshanzhufu", "filter", "dev"))

BIN
benshanzhufu/logs/.DS_Store


+ 3 - 0
benshanzhufu/logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13

+ 7 - 7
common/feishu.py

@@ -88,7 +88,7 @@ class Feishu:
             return "shtcnlZWYazInhf7Z60jkbLRJyd"
             return "shtcnlZWYazInhf7Z60jkbLRJyd"
         elif crawler == "music_album":
         elif crawler == "music_album":
             return "shtcnT6zvmfsYe1g0iv4pt7855g"
             return "shtcnT6zvmfsYe1g0iv4pt7855g"
-        elif crawler == "bszf":
+        elif crawler == "benshanzhufu":
             return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
             return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
         elif crawler == "gzh":
         elif crawler == "gzh":
             return "shtcnexNXnpDLHhARw0QdiwbYuA"
             return "shtcnexNXnpDLHhARw0QdiwbYuA"
@@ -100,11 +100,11 @@ class Feishu:
             return 'shtcnvOpx2P8vBXiV91Ot1MKIw8'
             return 'shtcnvOpx2P8vBXiV91Ot1MKIw8'
         elif crawler == 'zhihu':
         elif crawler == 'zhihu':
             return 'shtcnkGPBmGsjaqapgzouuj8MXe'
             return 'shtcnkGPBmGsjaqapgzouuj8MXe'
-        elif crawler == 'jxxf':
+        elif crawler == 'jixiangxingfu':
             return 'shtcnSx4nafMbLTq7xl7RHBwHBf'
             return 'shtcnSx4nafMbLTq7xl7RHBwHBf'
-        elif crawler == 'zmyx':
+        elif crawler == 'zhongmiaoyinxin':
             return 'shtcnbZIxstPeM0xshW07b26sve'
             return 'shtcnbZIxstPeM0xshW07b26sve'
-        elif crawler == 'ssnnyfq':
+        elif crawler == 'suisuiniannianyingfuqi':
             return 'shtcnyJmJSJynHDLLbLTkySfvZe'
             return 'shtcnyJmJSJynHDLLbLTkySfvZe'
         elif crawler == 'zhufumao':
         elif crawler == 'zhufumao':
             return 'shtcnXfIJthvkjhI5zlEJq84i6g'
             return 'shtcnXfIJthvkjhI5zlEJq84i6g'
@@ -112,11 +112,11 @@ class Feishu:
             return 'shtcn73NW0CyoOeF21HWO15KBsb'
             return 'shtcn73NW0CyoOeF21HWO15KBsb'
         elif crawler == 'haokan':
         elif crawler == 'haokan':
             return 'shtcnaYz8Nhv8q6DbWtlL6rMEBd'
             return 'shtcnaYz8Nhv8q6DbWtlL6rMEBd'
-        elif crawler == 'kdjsfq':
+        elif crawler == 'kandaojiushifuqi':
             return 'shtcnEokBkIjOUPAk8vbbPKnXgb'
             return 'shtcnEokBkIjOUPAk8vbbPKnXgb'
-        elif crawler == 'ssyy':
+        elif crawler == 'shengshengyingyin':
             return 'shtcnz1ymxHL1u8WHblfqfys7qe'
             return 'shtcnz1ymxHL1u8WHblfqfys7qe'
-        elif crawler == 'ggdc':
+        elif crawler == 'ganggangdouchuan':
             return 'shtcnTuJgeZU2bc7VaesAqk3QJx'
             return 'shtcnTuJgeZU2bc7VaesAqk3QJx'
         elif crawler == 'youtube':
         elif crawler == 'youtube':
             return 'shtcnrLyr1zbYbhhZyqpN7Xrd5f'
             return 'shtcnrLyr1zbYbhhZyqpN7Xrd5f'

+ 18 - 10
common/publish.py

@@ -196,6 +196,10 @@ class Publish:
             uids_prod_gongzhonghao_follow = [50322238]
             uids_prod_gongzhonghao_follow = [50322238]
             return random.choice(uids_prod_gongzhonghao_follow)
             return random.choice(uids_prod_gongzhonghao_follow)
 
 
+        elif crawler == 'benshanzhufu' and env == 'prod' and strategy == '推荐榜爬虫策略':
+            uids_prod_gongzhonghao_follow = [20631262, 20631263, 20631264, 20631265, 20631266, 20631267, 20631268, 20631269, 20631271, 20631272]
+            return random.choice(uids_prod_gongzhonghao_follow)
+
         else:
         else:
             return our_uid
             return our_uid
 
 
@@ -216,23 +220,29 @@ class Publish:
             return 'WEIXINZHISHU'
             return 'WEIXINZHISHU'
         elif crawler == "douyin":
         elif crawler == "douyin":
             return "DOUYIN"
             return "DOUYIN"
+        elif crawler == "benshanzhufu":
+            return "BENSHANZHUFU"
+        elif crawler == 'suisuiniannianyingfuqi':
+            return 'SUISUINIANNIANYINGFUQI'
+        elif crawler == 'jixiangxingfu':
+            return 'JIXIANGXINGFU'
+        elif crawler == 'ganggangdouchuan':
+            return 'GANGGANGDOUCHUAN'
+        elif crawler == 'zhongmiaoyinxin':
+            return 'ZHONGMIAOYINXIN'
+        elif crawler == 'zhiqingzongqun':
+            return 'ZHIQINGZONGQUN'
+        elif crawler == 'zhiqingtiantiankan':
+            return 'ZHIQINGZONGQUN'
 
 
         elif crawler == 'kanyikan':
         elif crawler == 'kanyikan':
             return 'KANYIKAN'
             return 'KANYIKAN'
         elif crawler == "weishi":
         elif crawler == "weishi":
             return "WEISHI"
             return "WEISHI"
-        elif crawler == "benshanzhufu":
-            return "BENSHANZHUFU"
         elif crawler == 'shipinhao':
         elif crawler == 'shipinhao':
             return 'SHIPINHAO_XCX'
             return 'SHIPINHAO_XCX'
         elif crawler == 'zhihu':
         elif crawler == 'zhihu':
             return 'ZHIHU'
             return 'ZHIHU'
-        elif crawler == 'jixiangxingfu':
-            return 'JIXIANGXINGFU'
-        elif crawler == 'zhongmiaoyinxin':
-            return 'ZHONGMIAOYINXIN'
-        elif crawler == 'suisuiniannianyingfuqi':
-            return 'SUISUINIANNIANYINGFUQI'
         elif crawler == 'zhufumao':
         elif crawler == 'zhufumao':
             return 'ZHUFUMAO'
             return 'ZHUFUMAO'
         elif crawler == 'zongjiao':
         elif crawler == 'zongjiao':
@@ -243,8 +253,6 @@ class Publish:
             return 'KANDAOJIUSHIFUQI'
             return 'KANDAOJIUSHIFUQI'
         elif crawler == 'shengshengyingyin':
         elif crawler == 'shengshengyingyin':
             return 'SHENGSHENGYINGYIN'
             return 'SHENGSHENGYINGYIN'
-        elif crawler == 'ganggangdouchuan':
-            return 'GANGGANGDOUCHUAN'
         else:
         else:
             return "CRAWLER"
             return "CRAWLER"
 
 

+ 15 - 15
main/scheduling_main.sh

@@ -42,21 +42,21 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 代码更新完成!"
   echo "$(date "+%Y-%m-%d %H:%M:%S") 代码更新完成!"
 fi
 fi
 
 
-#if [ ${env} = "--env=hk" ];then
-#  echo "无需重启Appium及adb服务"
-#elif [ ${env} = "--env=prod" ];then
-#  echo "无需重启Appium及adb服务"
-#else
-#  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启Appium..."
-#  ps aux | grep Appium.app | grep -v grep | awk '{print $2}' | xargs kill -9
-#  nohup ${node_path} /Applications/Appium.app/Contents/Resources/app/node_modules/appium/build/lib/main.js >>./nohup.log 2>&1 &
-#  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启Appium完毕!"
-#
-#  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启adb..."
-#  adb kill-server
-#  adb start-server
-#  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启adb完毕!"
-#fi
+if [ ${env} = "--env=hk" ];then
+  echo "无需重启Appium及adb服务"
+elif [ ${env} = "--env=prod" ];then
+  echo "无需重启Appium及adb服务"
+else
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启Appium..."
+  ps aux | grep Appium.app | grep -v grep | awk '{print $2}' | xargs kill -9
+  nohup ${node_path} /Applications/Appium.app/Contents/Resources/app/node_modules/appium/build/lib/main.js >>./nohup.log 2>&1 &
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启Appium完毕!"
+
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启adb..."
+  adb kill-server
+  adb start-server
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启adb完毕!"
+fi
 
 
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启服务..."
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启服务..."
 cd ${piaoquan_crawler_dir}
 cd ${piaoquan_crawler_dir}