wangkun 2 år sedan
förälder
incheckning
a30ae3b38e

BIN
.DS_Store


+ 26 - 8
README.MD

@@ -3,7 +3,7 @@
 ### 启动
 1. cd ./piaoquan_crawler
 2. sh ./main/scheduling_main.sh ${crawler_dir} ${log_type} ${crawler} ${env} ${machine} >>${nohup_dir} 2>&1 &
-```
+```commandline
 参数说明
 ${crawler_dir}:     爬虫执行路径,如: scheduling/scheduling_main/run_write_task.py
 ${log_type}:        日志命名格式,如: scheduling-task,则在 scheduling/logs/目录下,生成 2023-02-08-scheduling-task.log
@@ -14,7 +14,7 @@ ${nohup_dir}:       nohup日志存储路径,如: shceduling/nohup-task.log
 ```
 
 #### 运行命令
-```
+```commandline
 阿里云 102 服务器
 sh ./main/scheduling_main.sh scheduling/scheduling_main/run_write_task.py --log_type="scheduling-write" --crawler="scheduling" --env="prod" --machine="aliyun" nohup-write.log 
 sh ./main/scheduling_main.sh scheduling/scheduling_main/run_scheduling_task.py --log_type="scheduling-task" --crawler="scheduling" --env="prod" --machine="aliyun" nohup-task.log 
@@ -29,7 +29,6 @@ sh ./main/scheduling_main.sh scheduling/scheduling_main/run_scheduling_task.py -
 
 杀进程
 ps aux | grep scheduling
-ps aux | grep run_xigua
 ps aux | grep scheduling | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
@@ -38,7 +37,7 @@ ps aux | grep scheduling | grep -v grep | awk '{print $2}' | xargs kill -9
 ### 启动
 1. cd ./piaoquan_crawler
 2. sh ./main/main.sh ${crawler_dir} ${log_type} ${crawler} ${strategy} ${oss_endpoint} ${env} ${machine} ${nohup_dir}
-```
+```commandline
 参数说明
 ${crawler_dir}:     爬虫执行路径,如: ./youtube/youtube_main/run_youtube_follow.py
 ${log_type}:        日志命名格式,如: follow,则在 youtube/logs/目录下,生成 2023-02-08-follow.log
@@ -51,7 +50,7 @@ ${nohup_dir}:       nohup日志存储路径,如: ./youtube/nohup.log
 ```
 
 #### YouTube
-```
+```commandline
 sh ./main/main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="follow" --crawler="youtube" --strategy="定向爬虫策略" --oss_endpoint="hk" --env="prod" --machine="aliyun_hk" youtube/nohup.log
 # sh ./main/main.sh ./youtube/youtube_main/run_youtube_follow.py --log_type="follow" --crawler="youtube" --strategy="定向爬虫策略" --env="prod" --machine="aliyun_hk" youtube/nohup.log
 youtube杀进程命令: 
@@ -60,17 +59,19 @@ ps aux | grep run_youtube | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
 #### 微信指数
-```
+```commandline
 微信指数杀进程
 nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_sort.py >>./weixinzhishu/nohup_inner_sort.log 2>&1 &
 nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_long.py >>./weixinzhishu/nohup_inner_long.log 2>&1 &
 nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_out.py >>./weixinzhishu/nohup_out.log 2>&1 &
 ps aux | grep run_weixinzhishu
 ps aux | grep run_weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9
+获取 wechat_key 设备: Mac Air 
+ps aux | grep weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9 && cd /Users/piaoquan/Desktop/piaoquan_crawler && nohup python3 -u weixinzhishu/weixinzhishu_key/search_key_mac.py >> weixinzhishu/nohup.log 2>&1 &
 ```
 
 #### 西瓜视频
-```
+```commandline
 阿里云 102 服务器
 sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/nohup.log
 # sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --env="prod" --machine="aliyun" xigua/nohup.log
@@ -86,7 +87,7 @@ ps aux | grep run_xigua | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
 #### 快手
-```
+```commandline
 阿里云 102 服务器
 sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/nohup.log
 # sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="follow" --crawler="kuaishou" --strategy="定向爬虫策略" --env="prod" --machine="aliyun" kuaishou/nohup.log
@@ -99,4 +100,21 @@ sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="fo
 杀进程命令:
 ps aux | grep run_kuaishou
 ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
+```
+
+#### 小年糕
+```commandline
+阿里云 102 服务器
+定向爬虫策略: sh ./main/main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xiaoniangao/nohup.log
+小时榜爬虫策略: 
+播放量榜爬虫策略: 
+
+线下调试
+定向爬虫策略: sh ./main/main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --strategy="定向爬虫策略" --oss_endpoint="out" --env="dev" --machine="local" xiaoniangao/nohup.log
+小时榜爬虫策略: 
+播放量榜爬虫策略:
+
+杀进程命令
+ps aux | grep run_xiaoniangao
+ps aux | grep run_xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

+ 26 - 16
common/feishu.py

@@ -11,6 +11,7 @@ import requests
 import urllib3
 sys.path.append(os.getcwd())
 from common.common import Common
+# from common import Common
 proxies = {"http": None, "https": None}
 
 
@@ -405,26 +406,19 @@ class Feishu:
                 "Authorization": "Bearer " + cls.get_token(log_type, crawler),
                 "Content-Type": "application/json; charset=utf-8"
             }
-            # 手机号
-            wangkun = "13426262515"
-            gaonannan = "18501180073"
-            xinxin = "15546206651"
-            huxinxue = "18832292015"
-            wuchaoyue = "15712941385"
-            lijinchao = '18524120540'
 
             if username == "wangkun":
-                username = wangkun
+                username = "13426262515"
             elif username == "gaonannan":
-                username = gaonannan
+                username = "18501180073"
             elif username == "xinxin":
-                username = xinxin
+                username = "15546206651"
             elif username == "huxinxue":
-                username = huxinxue
+                username = "18832292015"
             elif username == "wuchaoyue":
-                username = wuchaoyue
-            elif username == "lijinchao":
-                username = lijinchao
+                username = "15712941385"
+            elif username == "muxinyi":
+                username = '13699208058'
 
             data = {"mobiles": [username]}
             urllib3.disable_warnings()
@@ -448,6 +442,22 @@ class Feishu:
                 users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
                     cls.get_userid(log_type, crawler, "gaonannan")) + "></at>\n"
 
+            elif crawler == "weixinzhishu_out":
+                content = "微信指数_站外指数"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=YVuVgQ"
+                users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
+                    cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
+            elif crawler == "weixinzhishu_inner_sort":
+                content = "微信指数_站内短期指数"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=DrZHpa"
+                users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
+                    cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
+            elif crawler == "weixinzhishu_inner_long":
+                content = "微信指数_站内长期指数"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=JpgyAv"
+                users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
+                    cls.get_userid(log_type, crawler, "muxinyi")) + "></at>\n"
+
             elif crawler == "xiaoniangao_hour":
                 content = "小年糕_小时级_已下载表"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=yatRv2"
@@ -646,7 +656,7 @@ class Feishu:
                     }],
                     "header": {
                         "title": {
-                            "content": "📣您有新的报警,请注意查收",
+                            "content": "📣您有新的信息,请注意查收",
                             "tag": "plain_text"
                         }
                     }
@@ -660,4 +670,4 @@ class Feishu:
 
 
 if __name__ == "__main__":
-    Feishu.bot('follow', 'xigua', '测试一下,请忽略 ~')
+    Feishu.bot('follow', 'weixinzhishu_out', 'test:微信指数_站外指数已抓取完毕')

+ 15 - 5
common/publish.py

@@ -13,8 +13,10 @@ import time
 import oss2
 import requests
 import urllib3
+
 sys.path.append(os.getcwd())
 from common.common import Common
+
 proxies = {"http": None, "https": None}
 
 
@@ -44,7 +46,8 @@ class Publish:
         if result['code'] != 0:
             Common.logger(log_type, crawler).error('pushlish failure msg = {}'.format(result['msg']))
         else:
-            Common.logger(log_type, crawler).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+            Common.logger(log_type, crawler).info(
+                'publish success video_id = : {}'.format(request_data['crawlerSrcId']))
         return video_id
 
     @classmethod
@@ -72,7 +75,8 @@ class Publish:
         if result['code'] != 0:
             Common.logger(log_type, crawler).error('pushlish failure msg = {}'.format(result['msg']))
         else:
-            Common.logger(log_type, crawler).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+            Common.logger(log_type, crawler).info(
+                'publish success video_id = : {}'.format(request_data['crawlerSrcId']))
         return video_id
 
     @classmethod
@@ -173,10 +177,16 @@ class Publish:
         if env == 'dev':
             uids_dev = [6267140, 6267141]
             return random.choice(uids_dev)
+        elif crawler == 'xiaoniangao' and env == 'prod' and strategy == '定向爬虫策略':
+            uids_prod_xiaoniangao_follow = [20631196, 20631197, 20631199, 20631200, 20631201, 20631185, 20631186,
+                                            20631187, 20631188, 20631189, 20631190, 20631191, 20631192, 20631193,
+                                            50322036, 50322037, 50322038, 50322039, 50322040, 50322041, 50322173,
+                                            50322175]
+            return random.choice(uids_prod_xiaoniangao_follow)
         elif crawler == 'kanyikan':
             uids_prod_kanyikan_moment = [20631208, 20631209, 20631210, 20631211, 20631212,
-                                          20631213, 20631214, 20631215, 20631216, 20631217,
-                                          20631223, 20631224, 20631225, 20631226, 20631227]
+                                         20631213, 20631214, 20631215, 20631216, 20631217,
+                                         20631223, 20631224, 20631225, 20631226, 20631227]
             return random.choice(uids_prod_kanyikan_moment)
         elif crawler == 'ggdc' and env == 'prod' and strategy == 'kanyikan_recommend':
             uids_ggdc_prod_recommend = [26117661, 26117662, 26117663]
@@ -352,4 +362,4 @@ class Publish:
             except Exception as e:
                 # 删除视频文件夹
                 shutil.rmtree(f"./{crawler}/videos/{fv}/")
-                Common.logger(log_type, crawler).exception('upload_and_publish error', e)
+                Common.logger(log_type, crawler).exception('upload_and_publish error', e)

BIN
kanyikan/.DS_Store


+ 0 - 59
kanyikan/kanyikan_main/run_kanyikan_moment.py

@@ -1,59 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/1/31
-"""
-看一看+小程序: 朋友圈榜单执行入口
-"""
-import datetime
-import sys
-import os
-import time
-import argparse
-sys.path.append(os.getcwd())
-from common.common import Common
-from common.feishu import Feishu
-from kanyikan.kanyikan_moment.kanyikan_moment import Moment
-
-
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
-    """
-    主函数入口
-    :param log_type: 日志命名: monent
-    :param crawler: 哪款爬虫: kanyikan
-    :param strategy: 爬虫策略: kanyikan_moment
-    :param machine: 爬虫运行机器,阿里云服务器: aliyun_hk / aliyun / macpro / macair / local
-    :param env: 正式环境: prod;测试环境: dev
-    :param oss_endpoint: 阿里云102服务器: inner ;其它: out
-    :return: None
-    """
-    while True:
-        if 1 >= datetime.datetime.now().hour >= 0:
-            pass
-        else:
-            moment_video_list = Feishu.get_sheet_content(log_type, crawler, 'iK58HX')
-            for moment_video_id in moment_video_list:
-                Common.logger(log_type, crawler).info(f"开始抓取{moment_video_id}朋友圈推荐视频\n")
-                Moment.get_videos(log_type, crawler, strategy, oss_endpoint, env, machine, moment_video_id)
-
-            Common.del_logs(log_type, crawler)
-            Common.logger(log_type, crawler).info("抓取完一轮,休眠 10 秒\n")
-            time.sleep(10)
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--strategy')  ## 添加参数
-    parser.add_argument('--our_uid')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    parser.add_argument('--machine')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    # print(args)
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         strategy=args.strategy,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)

+ 0 - 3
kanyikan/kanyikan_main/run_kanyikan_recommend.py

@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/1/31

+ 0 - 3
kanyikan/kanyikan_moment/__init__.py

@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/1/31

+ 0 - 290
kanyikan/kanyikan_moment/kanyikan_moment.py

@@ -1,290 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/1/31
-"""
-看一看+小程序: 朋友圈榜单
-"""
-import time
-import requests
-import urllib3
-from common.common import Common
-from common.feishu import Feishu
-from common.publish import Publish
-proxies = {"http": None, "https": None}
-
-
-class Moment:
-    # 抓取基础规则
-    @staticmethod
-    def download_rule(video_dict):
-        """
-        抓取基础规则
-        """
-        if int(float(video_dict['duration'])) >= 60:
-            if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0:
-                if int(video_dict['play_cnt']) >= 100000:
-                    if int(video_dict['like_cnt']) >= 0:
-                        if int(video_dict['share_cnt']) >= 0:
-                            return True
-                        else:
-                            return False
-                    else:
-                        return False
-                else:
-                    return False
-            return False
-        return False
-
-    # 获取推荐视频列表
-    @classmethod
-    def get_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine, moment_video_id):
-        url = "https://search.weixin.qq.com/cgi-bin/recwxa/snsgetvideoinfo?"
-        headers = {
-            "content-type": "application/json",
-            "Accept-Encoding": "gzip,compress,br,deflate",
-            "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)"
-                          " AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148"
-                          " MicroMessenger/8.0.20(0x18001442) NetType/WIFI Language/zh_CN",
-            "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/236/page-frame.html"
-        }
-        # videoid = random.choice(Feishu.get_sheet_content(log_type, crawler, 'iK58HX'))
-        params = {
-            "vid": moment_video_id,
-            "openid": "1924336296754305",
-            "model": "iPhone 11<iPhone12,1>14.7.1",
-            "sharesearchid": "8406805193800900989",
-            "shareOpenid": "oh_m45YffSEGxvDH--6s6g9ZkPxg",
-        }
-        try:
-            urllib3.disable_warnings()
-            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
-            if r.status_code != 200:
-                Common.logger(log_type, crawler).warning(f"response.status_code:{r.status_code}")
-                Common.logger(log_type, crawler).warning(f"response.text:{r.text}\n")
-            elif r.json()["errcode"] != 0:
-                Common.logger(log_type, crawler).warning(f"msg:{r.json()['msg']}\n")
-            elif "rec_video_list" not in r.json()["data"]:
-                Common.logger(log_type, crawler).warning(f"该视频没有推荐列表\n")
-            else:
-                feeds = r.json()["data"]["rec_video_list"]
-                for i in range(len(feeds)):
-                    # video_id
-                    if "vid" in feeds[i]:
-                        video_id = feeds[i]["vid"]
-                    else:
-                        video_id = 0
-
-                    # video_title
-                    if "title" in feeds[i]:
-                        video_title = feeds[i]["title"].strip().replace("\n", "") \
-                                .replace("/", "").replace("\\", "").replace("\r", "") \
-                                .replace(":", "").replace("*", "").replace("?", "") \
-                                .replace("?", "").replace('"', "").replace("<", "") \
-                                .replace(">", "").replace("|", "").replace(" ", "") \
-                                .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
-                                .replace("小年糕", "").replace("#", "").replace("Merge", "")
-                    else:
-                        video_title = 0
-
-                    # video_play_cnt
-                    if "played_cnt" in feeds[i]:
-                        video_play_cnt = feeds[i]["played_cnt"]
-                    else:
-                        video_play_cnt = 0
-
-                    # video_comment_cnt
-                    if "comment_cnt" in feeds[i]:
-                        video_comment_cnt = feeds[i]["comment_cnt"]
-                    else:
-                        video_comment_cnt = 0
-
-                    # video_liked_cnt
-                    if "liked_cnt" in feeds[i]:
-                        video_liked_cnt = feeds[i]["liked_cnt"]
-                    else:
-                        video_liked_cnt = 0
-
-                    # video_share_cnt
-                    if "shared_cnt" in feeds[i]:
-                        video_share_cnt = feeds[i]["shared_cnt"]
-                    else:
-                        video_share_cnt = 0
-
-                    # video_duration
-                    if "duration" in feeds[i]:
-                        video_duration = feeds[i]["duration"]
-                    else:
-                        video_duration = 0
-
-                    # video_width / video_height
-                    if "width" in feeds[i] or "height" in feeds[i]:
-                        video_width = feeds[i]["width"]
-                        video_height = feeds[i]["height"]
-                    else:
-                        video_width = 0
-                        video_height = 0
-
-                    # video_send_time
-                    if "upload_time" in feeds[i]:
-                        publish_time = feeds[i]["upload_time"]
-                    else:
-                        publish_time = 0
-
-                    # user_name
-                    if "user_info" not in feeds[i]:
-                        user_name = 0
-                    elif "nickname" not in feeds[i]["user_info"]:
-                        user_name = 0
-                    else:
-                        user_name = feeds[i]["user_info"]["nickname"].strip().replace("\n", "")
-
-                    # user_id
-                    if "user_info" not in feeds[i]:
-                        user_id = 0
-                    elif "openid" not in feeds[i]["user_info"]:
-                        user_id = 0
-                    else:
-                        user_id = feeds[i]["user_info"]["openid"]
-
-                    # head_url
-                    if "user_info" not in feeds[i]:
-                        avatar_url = 0
-                    elif "headimg_url" not in feeds[i]["user_info"]:
-                        avatar_url = 0
-                    else:
-                        avatar_url = feeds[i]["user_info"]["headimg_url"]
-
-                    # cover_url
-                    if "cover_url" not in feeds[i]:
-                        cover_url = 0
-                    else:
-                        cover_url = feeds[i]["cover_url"]
-
-                    # video_url
-                    if "play_info" not in feeds[i]:
-                        video_url = 0
-                    elif "items" not in feeds[i]["play_info"]:
-                        video_url = 0
-                    else:
-                        video_url = feeds[i]["play_info"]["items"][-1]["play_url"]
-
-                    video_dict = {
-                        'video_id': video_id,
-                        'video_title': video_title,
-                        'duration': video_duration,
-                        'play_cnt': video_play_cnt,
-                        'comment_cnt': video_comment_cnt,
-                        'like_cnt': video_liked_cnt,
-                        'share_cnt': video_share_cnt,
-                        'video_width': video_width,
-                        'video_height': video_height,
-                        'publish_time': publish_time,
-                        'user_name': user_name,
-                        'user_id': user_id,
-                        'avatar_url': avatar_url,
-                        'video_url': video_url,
-                        'cover_url': cover_url,
-                        'session': f'kanyikan_moment_{int(time.time())}',
-                    }
-                    Common.logger(log_type, crawler).info("video_title:{}", video_title)
-                    Common.logger(log_type, crawler).info("video_play_cnt:{}", video_play_cnt)
-                    Common.logger(log_type, crawler).info("video_duration:{}", video_duration)
-                    Common.logger(log_type, crawler).info("video_url:{}", video_url)
-
-                    # 过滤无效视频
-                    if video_id == 0 or video_title == 0 or video_duration == 0 or publish_time == 0 or user_id == 0\
-                            or avatar_url == 0 or cover_url == 0 or video_url == 0:
-                        Common.logger(log_type, crawler).warning("无效视频\n")
-                    # 抓取基础规则
-                    elif cls.download_rule(video_dict) is False:
-                        Common.logger(log_type, crawler).info("不满足基础规则\n")
-                    elif int(publish_time) < 1659283200:
-                        Common.logger(log_type, crawler).info(f'发布时间{time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(publish_time))} < 2022-08-01\n')
-                    # 过滤词库
-                    elif any(word if word in video_title else False for word in Feishu.get_sheet_content(log_type, crawler, 'rofdM5')) is True:
-                        Common.logger(log_type, crawler).info("视频已中过滤词\n")
-                    # 已下载视频表去重
-                    elif video_id in [j for m in Feishu.get_values_batch(log_type, crawler, "20ce0c") for j in m]:
-                        Common.logger(log_type, crawler).info("视频已下载\n")
-                    else:
-                        cls.download_publish(log_type, crawler, strategy, oss_endpoint, env, video_dict)
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_videos异常:{e}\n")
-
-    # 下载/上传视频
-    @classmethod
-    def download_publish(cls, log_type, crawler, strategy, oss_endpoint, env, video_dict):
-        try:
-            # 过滤空行及空标题视频
-            if video_dict['video_id'] == 0 \
-                    or video_dict['video_title'] == 0\
-                    or video_dict['video_url'] == 0:
-                Common.logger(log_type, crawler).info("无效视频\n")
-            # # 视频的抓取时间小于 2 天
-            # elif int(time.time()) - v_push_time > 172800:
-            #     Common.logger(log_type, crawler).info("抓取时间超过2天:{}", video_dict['video_title'])
-            #     # 删除行或列,可选 ROWS、COLUMNS
-            #     Feishu.dimension_range("tGqZMX", "ROWS", i + 1, i + 1)
-            #     return
-            # 视频发布时间不小于 2021-06-01 00:00:00
-            elif video_dict['publish_time'] < 1622476800:
-                Common.logger(log_type, crawler).info(f'发布时间{time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(video_dict["publish_time"]))} < 2021-06-01\n')
-            else:
-                # 下载封面
-                Common.download_method(log_type=log_type, crawler=crawler, text="cover",
-                                       title=video_dict['video_title'], url=video_dict['cover_url'])
-                # 下载视频
-                Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                                       title=video_dict['video_title'], url=video_dict['video_url'])
-                # 保存视频信息至 "./{crawler}/videos/{video_dict['video_title']}/info.txt"
-                Common.save_video_info(log_type, crawler, video_dict)
-
-                # 上传视频
-                Common.logger(log_type, crawler).info(f"开始上传视频:{video_dict['video_title']}")
-                our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                          crawler=crawler,
-                                                          strategy=strategy,
-                                                          oss_endpoint=oss_endpoint,
-                                                          our_uid="kanyikan_moment",
-                                                          env=env)
-                if env == 'dev':
-                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                else:
-                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                Common.logger(log_type, crawler).info("视频上传完成:{}", video_dict['video_title'])
-
-                # 保存视频 ID 到云文档
-                Common.logger(log_type, crawler).info(f"保存视频ID至云文档:{video_dict['video_title']}")
-                # 视频ID工作表,插入首行
-                Feishu.insert_columns(log_type, crawler, "20ce0c", "ROWS", 1, 2)
-                # 视频ID工作表,首行写入数据
-                upload_time = int(time.time())
-                values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
-                           "朋友圈",
-                           str(video_dict['video_id']),
-                           str(video_dict['video_title']),
-                           our_video_link,
-                           video_dict['play_cnt'],
-                           video_dict['comment_cnt'],
-                           video_dict['like_cnt'],
-                           video_dict['share_cnt'],
-                           video_dict['duration'],
-                           f"{video_dict['video_width']}*{video_dict['video_height']}",
-                           time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(video_dict["publish_time"])),
-                           video_dict['user_name'],
-                           video_dict['user_id'],
-                           video_dict['head_url'],
-                           video_dict['cover_url'],
-                           video_dict['video_url']
-                           ]]
-                time.sleep(1)
-                Feishu.update_values(log_type, crawler, "20ce0c", "F2:W2", values)
-                Common.logger(log_type, crawler).info('下载/上传成功\n')
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
-
-
-if __name__ == "__main__":
-    kanyikan_moment = Moment()
-
-    pass

+ 0 - 3
kanyikan/kanyikan_recommend/kanyikan_recommend.py

@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/1/31

BIN
kanyikan/videos/.DS_Store


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 0 - 0
weixinzhishu/weixinzhishu_chlsfiles/charles202303131913.txt


+ 5 - 0
weixinzhishu/weixinzhishu_key/search_key_mac.py

@@ -1,6 +1,11 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2023/2/20
+"""
+安装 atomac:
+1.翻墙
+2.pip3 install git+https://github.com/pyatom/pyatom/
+"""
 import json
 import os
 import sys

+ 2 - 0
weixinzhishu/weixinzhishu_main/weixinzhishu_inner_long.py

@@ -144,6 +144,8 @@ class Test:
                         Common.logger(log_type, crawler).info("写入飞书成功\n")
                 break
 
+        Feishu.bot(log_type, "weixinzhishu_inner_long", "微信指数_站内长期指数抓取完毕")
+
 
 if __name__ == "__main__":
     Test.get_score_test("inner-long", "weixinzhishu")

+ 2 - 0
weixinzhishu/weixinzhishu_main/weixinzhishu_inner_sort.py

@@ -144,6 +144,8 @@ class Test:
                         Common.logger(log_type, crawler).info("写入飞书成功\n")
                 break
 
+        Feishu.bot(log_type, "weixinzhishu_inner_sort", "微信指数_站内短期指数抓取完毕")
+
 
 if __name__ == "__main__":
     Test.get_score_test("inner-sort", "weixinzhishu")

+ 2 - 0
weixinzhishu/weixinzhishu_main/weixinzhishu_out.py

@@ -144,6 +144,8 @@ class Test:
                         Common.logger(log_type, crawler).info("写入飞书成功\n")
                 break
 
+        Feishu.bot(log_type, "weixinzhishu_out", "微信指数_站外指数抓取完毕")
+
 
 if __name__ == "__main__":
     # print(Test.get_words("test", "weixinzhishu"))

BIN
weixinzhishu/logs/.DS_Store → xiaoniangao/.DS_Store


+ 1 - 1
kanyikan/kanyikan_recommend/__init__.py → xiaoniangao/__init__.py

@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
-# @Time: 2023/1/31
+# @Time: 2023/3/13

+ 0 - 0
kanyikan/logs/.DS_Store → xiaoniangao/logs/.DS_Store


+ 1 - 1
kanyikan/__init__.py → xiaoniangao/xiaoniangao_follow/__init__.py

@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
-# @Time: 2023/1/31
+# @Time: 2023/3/13

+ 118 - 0
xiaoniangao/xiaoniangao_follow/insert_video.py

@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/14
+import json
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.scheduling_db  import MysqlHelper
+from common.feishu import Feishu
+
+
+class Insert:
+    @classmethod
+    def insert_video_from_feishu_to_mysql(cls, log_type, crawler, env, machine):
+        # xiaoniangao_sheetid_list = ['bkIrcr']  # follow
+        xiaoniangao_sheetid_list = ['InCA1I']  # hour
+        for sheetid in xiaoniangao_sheetid_list:
+            xiaoniangao_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            for i in range(1, len(xiaoniangao_sheet)):
+            # for i in range(1, 3):
+                if xiaoniangao_sheet[i][5] is None or xiaoniangao_sheet[i][9] is None:
+                    continue
+                video_id = xiaoniangao_sheet[i][9].replace("https://admin.piaoquantv.com/cms/post-detail/", "").replace("/info", "")
+                if video_id == "None":
+                    continue
+                video_id = int(video_id)
+                out_user_id = str(xiaoniangao_sheet[i][19])
+                platform = "小年糕"
+                strategy = "定向爬虫策略"
+                out_video_id = str(xiaoniangao_sheet[i][7])
+                video_title = str(xiaoniangao_sheet[i][8])
+                cover_url = str(xiaoniangao_sheet[i][21])
+                video_url = str(xiaoniangao_sheet[i][22])
+                duration = int(xiaoniangao_sheet[i][14])
+                publish_time = str(xiaoniangao_sheet[i][16]).replace("/", "-")
+                play_cnt = int(xiaoniangao_sheet[i][10])
+                like_cnt = int(xiaoniangao_sheet[i][12])
+                share_cnt = int(xiaoniangao_sheet[i][13])
+                # collection_cnt = 0
+                comment_cnt = int(xiaoniangao_sheet[i][11])
+                crawler_rule = json.dumps({"play_cnt": {"min": 500}, "duration": {"min": 40}, "publish_day": {"min": 3}})
+                width = int(xiaoniangao_sheet[i][15].split("*")[0])
+                height = int(xiaoniangao_sheet[i][15].split("*")[1])
+
+                # print(f"video_id:{video_id}, type:{type(video_id)}")
+                # print(f"user_id:{user_id}, type:{type(user_id)}")
+                # print(f"out_user_id:{out_user_id}, type:{type(out_user_id)}")
+                # print(f"platform:{platform}, type:{type(platform)}")
+                # print(f"strategy:{strategy}, type:{type(strategy)}")
+                # print(f"out_video_id:{out_video_id}, type:{type(out_video_id)}")
+                # print(f"video_title:{video_title}, type:{type(video_title)}")
+                # print(f"cover_url:{cover_url}, type:{type(cover_url)}")
+                # print(f"video_url:{video_url}, type:{type(video_url)}")
+                # print(f"duration:{duration}, type:{type(duration)}")
+                # print(f"publish_time:{publish_time}, type:{type(publish_time)}")
+                # print(f"play_cnt:{play_cnt}, type:{type(play_cnt)}")
+                # print(f"like_cnt:{like_cnt}, type:{type(like_cnt)}")
+                # print(f"share_cnt:{share_cnt}, type:{type(share_cnt)}")
+                # print(f"comment_cnt:{comment_cnt}, type:{type(comment_cnt)}")
+                # print(f"crawler_rule:{crawler_rule}, type:{type(crawler_rule)}")
+                # print(f"width:{width}, type:{type(width)}")
+                # print(f"height:{height}, type:{type(height)}\n")
+
+                select_sql = f""" select * from crawler_video where platform="{platform}" and out_video_id="{out_video_id}" """
+                Common.logger(log_type, crawler).info(f"select_sql:{select_sql}")
+                repeat_video = MysqlHelper.get_values(log_type, crawler, select_sql, env, machine)
+                Common.logger(log_type, crawler).info(f"repeat_video:{repeat_video}")
+
+                if repeat_video is not None and len(repeat_video) != 0:
+                    Common.logger(log_type, crawler).info(f"{video_title} 已存在数据库中\n")
+                else:
+                    # 视频信息保存数据库
+                    insert_sql = f""" insert into crawler_video(video_id,
+                                    out_user_id,
+                                    platform,
+                                    strategy,
+                                    out_video_id,
+                                    video_title,
+                                    cover_url,
+                                    video_url,
+                                    duration,
+                                    publish_time,
+                                    play_cnt,
+                                    like_cnt,
+                                    share_cnt,
+                                    comment_cnt,
+                                    crawler_rule,
+                                    width,
+                                    height)
+                                    values({video_id},
+                                    "{out_user_id}",
+                                    "{platform}",
+                                    "{strategy}",
+                                    "{out_video_id}",
+                                    "{video_title}",
+                                    "{cover_url}",
+                                    "{video_url}",
+                                    {duration},
+                                    "{publish_time}",
+                                    {play_cnt},
+                                    {like_cnt},
+                                    {share_cnt},
+                                    {comment_cnt},
+                                    '{crawler_rule}',
+                                    {width},
+                                    {height}) """
+                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                    MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+
+if __name__ == "__main__":
+    # Insert.insert_video_from_feishu_to_mysql("insert-dev-follow", "xiaoniangao", "dev", "local")
+    # Insert.insert_video_from_feishu_to_mysql("insert-dev-hour", "xiaoniangao", "dev", "local")
+    # Insert.insert_video_from_feishu_to_mysql("insert-prod-follow", "xiaoniangao", "prod", "local")
+    Insert.insert_video_from_feishu_to_mysql("insert-prod-hour", "xiaoniangao", "prod", "local")
+    pass

+ 468 - 0
xiaoniangao/xiaoniangao_follow/xiaoniangao_follow.py

@@ -0,0 +1,468 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/13
+import json
+import os
+import random
+import shutil
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.publish import Publish
+from common.feishu import Feishu
+proxies = {"http": None, "https": None}
+
+
+class XiaoniangaoFollow:
+    platform = "小年糕"
+    # 小程序个人主页视频列表翻页参数
+    next_t = None
+    # 配置微信
+    # wechat_sheet = Feishu.get_values_batch("follow", "xiaoniangao", "dzcWHw")
+    # follow_x_mid = wechat_sheet[2][3]
+    # follow_x_token_id = wechat_sheet[3][3]
+    # follow_referer = wechat_sheet[4][3]
+    # follow_uid = wechat_sheet[5][3]
+    # follow_token = wechat_sheet[6][3]
+
+    # 过滤敏感词
+    @classmethod
+    def filter_words(cls, log_type):
+        # 敏感词库列表
+        word_list = []
+        # 从云文档读取所有敏感词,添加到词库列表
+        lists = Feishu.get_values_batch(log_type, "xiaoniangao", "DRAnZh")
+        for i in lists:
+            for j in i:
+                # 过滤空的单元格内容
+                if j is None:
+                    pass
+                else:
+                    word_list.append(j)
+        return word_list
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(video_dict):
+        """
+        下载视频的基本规则
+        :param video_dict: 视频信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        # 视频时长
+        if int(float(video_dict['duration'])) >= 40:
+            # 宽或高
+            if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0:
+                # 播放量
+                if int(video_dict['play_cnt']) >= 500:
+                    # 分享量
+                    if int(video_dict['share_cnt']) >= 0:
+                        return True
+                    else:
+                        return False
+                else:
+                    return False
+            else:
+                return False
+        else:
+            return False
+
+    # 从云文档获取关注用户列表
+    @classmethod
+    def get_users(cls, log_type, crawler):
+        try:
+            while True:
+                follow_sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "oNpThi")
+                if follow_sheet is None:
+                    time.sleep(1)
+                    continue
+                if len(follow_sheet) == 1:
+                    Common.logger(log_type, crawler).info("暂无定向爬取账号")
+                else:
+                    user_list = []
+                    for i in range(1, len(follow_sheet)):
+                        profile_id = follow_sheet[i][0]
+                        profile_mid = follow_sheet[i][1]
+                        user_name = follow_sheet[i][2]
+                        user_dict = {
+                            "profile_id": profile_id,
+                            "profile_mid": profile_mid,
+                            "user_name": user_name,
+                        }
+                        user_list.append(user_dict)
+                    return user_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error("从云文档获取关注用户列表异常:{}", e)
+
+    # 获取个人主页视频
+    @classmethod
+    def get_videoList(cls, log_type, crawler, strategy, p_mid, oss_endpoint, env, machine):
+        try:
+            while True:
+                url = "https://api.xiaoniangao.cn/profile/list_album"
+                headers = {
+                    # "X-Mid": str(cls.follow_x_mid),
+                    "X-Mid": '1fb47aa7a860d9',
+                    # "X-Token-Id": str(cls.follow_x_token_id),
+                    "X-Token-Id": '9f2cb91f9952c107ecb73642083e1dec-1145266232',
+                    "content-type": "application/json",
+                    # "uuid": str(cls.follow_uid),
+                    "uuid": 'f40c2e7c-3cfb-4804-b513-608c0280268c',
+                    "Accept-Encoding": "gzip,compress,br,deflate",
+                    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)"
+                                  " AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 "
+                                  "MicroMessenger/8.0.20(0x18001435) NetType/WIFI Language/zh_CN",
+                    # "Referer": str(cls.follow_referer)
+                    "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/654/page-frame.html'
+                }
+                json_text = {
+                    "visited_mid": p_mid,
+                    "start_t": cls.next_t,
+                    "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!690x385r/crop/690x385/interlace/1/format/jpg",
+                    "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!120x120r/crop/120x120/interlace/1/format/jpg",
+                    "limit": 20,
+                    # "token": str(cls.follow_token),
+                    "token": '54e4c603f7bf3dc009c86b49ed91be36',
+                    # "uid": str(cls.follow_uid),
+                    "uid": 'f40c2e7c-3cfb-4804-b513-608c0280268c',
+                    "proj": "ma",
+                    "wx_ver": "8.0.23",
+                    "code_ver": "3.68.0",
+                    "log_common_params": {
+                        "e": [{
+                            "data": {
+                                "page": "profilePage",
+                                "topic": "public"
+                            }
+                        }],
+                        "ext": {
+                            "brand": "iPhone",
+                            "device": "iPhone 11",
+                            "os": "iOS 14.7.1",
+                            "weixinver": "8.0.23",
+                            "srcver": "2.24.7",
+                            "net": "wifi",
+                            "scene": "1089"
+                        },
+                        "pj": "1",
+                        "pf": "2",
+                        "session_id": "7468cf52-00ea-432e-8505-6ea3ad7ec164"
+                    }
+                }
+                urllib3.disable_warnings()
+                r = requests.post(url=url, headers=headers, json=json_text, proxies=proxies, verify=False)
+                if 'data' not in r.text or r.status_code != 200:
+                    Common.logger(log_type, crawler).info(f"get_videoList:{r.text}\n")
+                    cls.next_t = None
+                    return
+                elif 'list' not in r.json()['data']:
+                    Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
+                    cls.next_t = None
+                    return
+                elif len(r.json()['data']['list']) == 0:
+                    Common.logger(log_type, crawler).info(f"没有更多数据啦~\n")
+                    cls.next_t = None
+                    return
+                else:
+                    cls.next_t = r.json()["data"]["next_t"]
+                    feeds = r.json()["data"]["list"]
+                    for i in range(len(feeds)):
+                        # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
+                        char_sheet = Feishu.get_values_batch("hour", "xiaoniangao", "BhlbST")
+                        expression_list = []
+                        char_list = []
+                        for q in range(len(char_sheet)):
+                            if char_sheet[q][0] is not None:
+                                expression_list.append(char_sheet[q][0])
+                            if char_sheet[q][1] is not None:
+                                char_list.append(char_sheet[q][1])
+                        befor_video_title = feeds[i]["title"].strip().replace("\n", "") \
+                            .replace("/", "").replace("\r", "").replace("#", "") \
+                            .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
+                            .replace(":", "").replace("*", "").replace("?", "") \
+                            .replace("?", "").replace('"', "").replace("<", "") \
+                            .replace(">", "").replace("|", "").replace(" ", "")
+                        expression = random.choice(expression_list)
+                        expression_title_list = [expression + befor_video_title, befor_video_title + expression]
+                        # 标题,表情随机加在片头
+                        title_list1 = random.choice(expression_title_list)
+                        # 标题,表情随机加在片尾
+                        title_list2 = befor_video_title + random.choice(char_list)
+                        # # 替代句子中间的标点符号
+                        # title_list3 = befor_video_title.replace(
+                        #     ",", random.choice(expression_list)).replace(",", random.choice(expression_list))
+                        title_list4 = [title_list1, title_list2]
+                        video_title = random.choice(title_list4)
+
+                        # 用户名
+                        user_name = feeds[i]["album_user"]["nick"].strip().replace("\n", "") \
+                            .replace("/", "").replace("快手", "").replace(" ", "") \
+                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+
+                        # 视频 ID
+                        if "vid" in feeds[i]:
+                            video_id = feeds[i]["vid"]
+                        else:
+                            video_id = 0
+
+                        # 播放量
+                        if "play_pv" in feeds[i]:
+                            video_play_cnt = feeds[i]["play_pv"]
+                        else:
+                            video_play_cnt = 0
+
+                        # 点赞
+                        if "total" in feeds[i]["favor"]:
+                            video_like_cnt = feeds[i]["favor"]["total"]
+                        else:
+                            video_like_cnt = 0
+
+                        # 评论数
+                        if "comment_count" in feeds[i]:
+                            video_comment_cnt = feeds[i]["comment_count"]
+                        else:
+                            video_comment_cnt = 0
+
+                        # 分享
+                        if "share" in feeds[i]:
+                            video_share_cnt = feeds[i]["share"]
+                        else:
+                            video_share_cnt = 0
+
+                        # 时长
+                        if "du" in feeds[i]:
+                            video_duration = int(feeds[i]["du"] / 1000)
+                        else:
+                            video_duration = 0
+
+                        # 发布时间
+                        if "t" in feeds[i]:
+                            publish_time_stamp = feeds[i]["t"] / 1000
+                        else:
+                            publish_time_stamp = 0
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+
+                        # 宽和高
+                        if "w" in feeds[i] or "h" in feeds[i]:
+                            video_width = feeds[i]["w"]
+                            video_height = feeds[i]["h"]
+                        else:
+                            video_width = 0
+                            video_height = 0
+
+                        # 头像
+                        if "hurl" in feeds[i]["album_user"]:
+                            head_url = feeds[i]["album_user"]["hurl"]
+                        else:
+                            head_url = 0
+
+                        # 用户 ID
+                        if "id" in feeds[i]:
+                            profile_id = feeds[i]["id"]
+                        else:
+                            profile_id = 0
+
+                        # 用户 mid
+                        if "mid" in feeds[i]:
+                            profile_mid = feeds[i]["mid"]
+                        else:
+                            profile_mid = 0
+
+                        # 封面
+                        if "url" in feeds[i]:
+                            cover_url = feeds[i]["url"]
+                        else:
+                            cover_url = 0
+
+                        # 视频播放地址
+                        if "v_url" in feeds[i]:
+                            video_url = feeds[i]["v_url"]
+                        else:
+                            video_url = 0
+
+                        # 过滤无效视频
+                        if video_id == 0 \
+                                or video_title == 0 \
+                                or publish_time_stamp == 0 \
+                                or video_duration == 0 \
+                                or video_url == 0:
+                            Common.logger(log_type, crawler).info("无效视频\n")
+                        elif int(time.time()) - publish_time_stamp > 3600*24*3:
+                            Common.logger(log_type, crawler).info(f"发布时间超过3天:{publish_time_str}")
+                            cls.next_t = None
+                            return
+                        else:
+                            video_dict = {
+                            "video_id": video_id,
+                            "video_title": video_title,
+                            "duration": video_duration,
+                            "play_cnt": video_play_cnt,
+                            "like_cnt": video_like_cnt,
+                            "comment_cnt": video_comment_cnt,
+                            "share_cnt": video_share_cnt,
+                            "user_name": user_name,
+                            "publish_time_stamp": publish_time_stamp,
+                            "publish_time_str": publish_time_str,
+                            "video_width": video_width,
+                            "video_height": video_height,
+                            "avatar_url": head_url,
+                            "profile_id": profile_id,
+                            "profile_mid": profile_mid,
+                            "cover_url": cover_url,
+                            "video_url": video_url,
+                            "session": f"xiaoniangao-follow-{int(time.time())}"
+                        }
+                            for k, v in video_dict.items():
+                                Common.logger(log_type, crawler).info(f"{k}:{v}")
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 strategy=strategy,
+                                                 video_dict=video_dict,
+                                                 oss_endpoint=oss_endpoint,
+                                                 env=env,
+                                                 machine=machine)
+        except Exception as error:
+            Common.logger(log_type, crawler).error(f"获取个人主页视频异常:{error}\n")
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env, machine):
+        sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, strategy, video_dict, oss_endpoint, env, machine):
+        try:
+            if cls.download_rule(video_dict) is False:
+                Common.logger(log_type, crawler).info("不满足基础门槛\n")
+            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            elif any(str(word) if str(word) in video_dict['video_title'] else False for word in cls.filter_words(log_type)) is True:
+                Common.logger(log_type, crawler).info("视频已中过滤词\n")
+            else:
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
+                # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid="follow",
+                                                          env=env,
+                                                          oss_endpoint=oss_endpoint)
+                if env == "dev":
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                else:
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                Common.Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return
+
+                # 视频信息保存数据库
+                rule_dict = {
+                    "duration": {"min": 40, "max": 100000000},
+                    "play_cnt": {"min": 500}
+                }
+
+                insert_sql = f""" insert into crawler_video(video_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                "{video_dict['profile_id']}",
+                                                "{cls.platform}",
+                                                "定向爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+                # 视频写入飞书
+                Feishu.insert_columns(log_type, crawler, "Wu0CeL", "ROWS", 1, 2)
+                # 视频ID工作表,首行写入数据
+                upload_time = int(time.time())
+                values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                           "用户主页",
+                           str(video_dict['video_id']),
+                           str(video_dict['video_title']),
+                           our_video_link,
+                           video_dict['play_cnt'],
+                           video_dict['comment_cnt'],
+                           video_dict['like_cnt'],
+                           video_dict['share_cnt'],
+                           video_dict['duration'],
+                           f"{video_dict['video_width']}*{video_dict['video_height']}",
+                           str(video_dict['publish_time_str']),
+                           str(video_dict['user_name']),
+                           str(video_dict['profile_id']),
+                           str(video_dict['profile_mid']),
+                           str(video_dict['avatar_url']),
+                           str(video_dict['cover_url']),
+                           str(video_dict['video_url'])]]
+                time.sleep(1)
+                Feishu.update_values(log_type, crawler, "Wu0CeL", "F2:Z2", values)
+                Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
+
+        except Exception as e:
+            Common.logger(log_type, crawler).error("下载/上传异常:{}", e)
+            Feishu.dimension_range("person", "xiaoniangao", "k6ldje", "ROWS", 2, 2)
+
+    # 获取所有关注列表的用户视频
+    @classmethod
+    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
+        try:
+            # 已关注的用户列表 mids
+            user_list = cls.get_users(log_type, crawler)
+            for user in user_list:
+                user_name = user['user_name']
+                profile_mid = user['profile_mid']
+                Common.logger(log_type, crawler).info(f"获取 {user_name} 主页视频")
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  strategy=strategy,
+                                  p_mid=profile_mid,
+                                  oss_endpoint=oss_endpoint,
+                                  env=env,
+                                  machine=machine)
+                cls.next_t = None
+                time.sleep(1)
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
+
+
+if __name__ == "__main__":
+    # print(XiaoniangaoFollow.repeat_video("follow", "xiaoniangao", "4919087666", "prod", "aliyun"))
+    print(XiaoniangaoFollow.repeat_video("follow", "xiaoniangao", "4919087666", "dev", "local"))
+    pass

+ 1 - 1
kanyikan/kanyikan_main/__init__.py → xiaoniangao/xiaoniangao_main/__init__.py

@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
-# @Time: 2023/1/31
+# @Time: 2023/3/13

+ 45 - 0
xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py

@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/13
+import argparse
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from common.common import Common
+from xiaoniangao.xiaoniangao_follow.xiaoniangao_follow import XiaoniangaoFollow
+
+
+def main(log_type, crawler, strategy, oss_endpoint, env, machine):
+    while True:
+        try:
+            Common.logger(log_type, crawler).info('开始抓取 小年糕 定向榜\n')
+            XiaoniangaoFollow.get_follow_videos(log_type=log_type,
+                                                crawler=crawler,
+                                                strategy=strategy,
+                                                oss_endpoint=oss_endpoint,
+                                                env=env,
+                                                machine=machine)
+            Common.del_logs(log_type, crawler)
+            Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+            time.sleep(60)
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f"小年糕定向抓取异常:{e}\n")
+            # Feishu.bot(log_type, crawler, f"{e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--strategy')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    parser.add_argument('--machine')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         strategy=args.strategy,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env,
+         machine=args.machine)

Vissa filer visades inte eftersom för många filer har ändrats