wangkun 2 years ago
parent
commit
86046c9301
8 changed files with 481 additions and 49 deletions
  1. 35 25
      README.MD
  2. 4 3
      haokan.sh
  3. 37 9
      main/demo.py
  4. 5 6
      main/haokan_channel.py
  5. 365 0
      main/haokan_follow.py
  6. 2 0
      main/haokan_hot.py
  7. 8 6
      main/haokan_publish.py
  8. 25 0
      main/run_haokan_follow.py

+ 35 - 25
README.MD

@@ -1,34 +1,44 @@
-# 好看视频爬虫
-1. git: https://git.yishihui.com/Server/crawler_haokan.git
-2. feishu: https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=TaQXk3
+# 好看视频PC版爬虫
 
-# 软件架构
-1. python==3.10
-2. Appium_Python_Client==2.6.1
-3. loguru==0.6.0
-4. oss2==2.15.0
-5. psutil==5.9.2
-6. requests==2.27.1
-7. selenium==4.4.3
-8. urllib3==1.26.9
+#### 文档链接
+* [Git](https://git.yishihui.com/Server/crawler_haokan.git)
+* [Jenkins](https://jenkins-on.yishihui.com/view/%E7%A5%A8%E5%9C%88-%E7%88%AC%E8%99%AB/job/spider-%E5%A5%BD%E7%9C%8B%E8%A7%86%E9%A2%91/)
+* [好看爬虫表](https://w42nne6hzg.feishu.cn/sheets/shtcnaYz8Nhv8q6DbWtlL6rMEBd?sheet=TaQXk3)
+* [需求文档](https://w42nne6hzg.feishu.cn/docx/VKrEdhcbVoYLEbxC7GQc7dCCnt7)
+
+#### 软件架构
+* python==3.10
+* loguru==0.6.0
+* oss2==2.15.0
+* psutil==5.9.2
+* requests==2.27.1
+* selenium==4.4.3
+* urllib3==1.26.9
 
 #### 使用说明
-1. cd ./crawler_haokan
-2. python3 ./main/run_xx.py
+* cd ./crawler_haokan && sh haokan.sh
+* 或者,Jenkins 重新构建
+
+#### 更新记录
+2023/01/16
+* 新增:定向抓取榜单
+* 用户主页:发布时间<=30天
+* 分辨率 720P 以上
+* 视频时长>=60秒
+* 上传账号与站外用户一一对应
 
-#### 需求
 2022/12/01
-1. 新增:搞笑频道
-2. 新增:综艺频道
-3. 新增:生活频道
-4. 新增:美食频道
-5. 新增:三农频道
+* 新增:搞笑频道
+* 新增:综艺频道
+* 新增:生活频道
+* 新增:美食频道
+* 新增:三农频道
 
 2022/11/26 频道播放量榜
-1. 新增:首页频道
-2. 新增:音乐频道
+* 新增:首页频道
+* 新增:音乐频道
 
 2022/11/24 今日热播榜
-1. 运行时间: 每日13:00
-2. 热门列表 50 条,全抓
-3. 上传账号: 26117577
+* 运行时间: 每日13:00
+* 热门列表 50 条,全抓
+* 上传账号: 26117577

+ 4 - 3
haokan.sh

@@ -2,18 +2,19 @@
 echo "开始"
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在杀进程..."
 # shellcheck disable=SC2009
-# ps aux | grep run_haokan
 ps aux | grep run_haokan_hot.py | grep -v grep | awk '{print $2}' | xargs kill -9
-# shellcheck disable=SC2009
 ps aux | grep run_haokan_channel.py | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_haokan_follow.py | grep -v grep | awk '{print $2}' | xargs kill -9
 echo "$(date "+%Y-%m-%d %H:%M:%S") 进程已杀死!"
 
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在更新代码..."
 cd /data5/wangkun/crawler_haokan/ && git pull origin master --force
 echo "$(date "+%Y-%m-%d %H:%M:%S") 代码更新完成!"
-source /etc/profile
+
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启服务..."
+source /etc/profile
 nohup python3 -u main/run_haokan_hot.py >>./nohup.log 2>&1 &
 nohup python3 -u main/run_haokan_channel.py >>./nohup.log 2>&1 &
+nohup python3 -u main/run_haokan_follow.py >>./nohup.log 2>&1 &
 echo "$(date "+%Y-%m-%d %H:%M:%S") 服务重启完毕!"
 exit 0

+ 37 - 9
main/demo.py

@@ -1,9 +1,9 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2022/11/23
+import datetime
 import time
 import requests
-from selenium.webdriver import DesiredCapabilities
 
 from main.common import Common
 from main.feishu_lib import Feishu
@@ -14,14 +14,33 @@ class Demo:
     def get_sheet(cls, log_type, crawler, sheetid):
         sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
         print(sheet)
+        # a = '2023/01/01 13:13:13'
+        # Feishu.update_values(log_type, crawler, sheetid, 'F2:F2', [[a]])
 
     @classmethod
-    def publish_time(cls):
-        time1 = '发布时间:2022年11月20日'
-        time2 = time1.replace('发布时间:', '').replace('年', '/').replace('月', '/').replace('日', '')
-        print(time2)
-        time3 = int(time.mktime(time.strptime(time2, "%Y/%m/%d")))
-        print(time3)
+    def publish_time(cls, publish_time):
+        today = datetime.date.today()
+        if '刚刚' in publish_time:
+            publish_time_stamp = int(time.time())
+        elif '分钟前' in publish_time:
+            publish_time_stamp = int(time.time()) - int(publish_time[0])*60
+        elif '小时前' in publish_time:
+            publish_time_stamp = int(time.time()) - int(publish_time[0])*3600
+        elif '昨天' in publish_time:
+            publish_time_str = (datetime.date.today() + datetime.timedelta(days=-1)).strftime("%Y/%m/%d")
+            publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y/%m/%d")))
+        elif '天前' in publish_time:
+            publish_time_str = today - datetime.timedelta(days=int(publish_time[0]))
+            publish_time_stamp = int(time.mktime(publish_time_str.timetuple()))
+        elif '年' in publish_time:
+            publish_time_str = publish_time.replace('年', '/').replace('月', '/').replace('日', '')
+            publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y/%m/%d")))
+        else:
+            publish_time_str = publish_time.replace('月', '/').replace('日', '')
+            this_year = datetime.datetime.now().year
+            publish_time_stamp = int(time.mktime(time.strptime(f"{this_year}/{publish_time_str}", "%Y/%m/%d")))
+
+        print(f'publish_time_stamp:{publish_time_stamp}')
 
     @classmethod
     def get_video_url(cls, log_type, video_id):
@@ -63,7 +82,7 @@ class Demo:
     user_cnt = []
 
     @classmethod
-    def get_follow_users(cls, log_type):
+    def get_follow_users(cls):
         while True:
             url = "https://www.kuaishou.com/graphql"
             payload = {
@@ -194,11 +213,20 @@ class Demo:
 
 
 if __name__ == '__main__':
-    # Demo.get_sheet('demo', 'haokan', '7f05d8')
+    Demo.get_sheet('demo', 'haokan', 'kVaSjf')
     # Demo.publish_time()
     # Demo.get_video_url('demo', '10377041690614321392')
     # Demo.get_follow_users('demo')
     # Demo.get_video_feeds('demo', '3xfr3gqnxmk92y2')
     # print(Feishu.get_values_batch('log_type', 'haokan', '5LksMx')[0][0])
     # print(type(Feishu.get_values_batch('log_type', 'haokan', '5LksMx')[0][0]))
+
+    # Demo.publish_time(publish_time='刚刚')
+    # Demo.publish_time(publish_time='1分钟前')
+    # Demo.publish_time(publish_time='1小时前')
+    # Demo.publish_time(publish_time='昨天')
+    # Demo.publish_time(publish_time='3天前')
+    # Demo.publish_time(publish_time='2022年01月10日')
+    # Demo.publish_time(publish_time='01月10日')
+
     pass

+ 5 - 6
main/haokan_channel.py

@@ -217,14 +217,13 @@ class Channel:
                 Common.logger(log_type).info('无效视频\n')
             elif cls.download_rule(video_dict['play_cnt'], video_dict['duration']) is False:
                 Common.logger(log_type).info('不满足抓取规则\n')
-            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(
-                    log_type, 'haokan', '5pWipX') for x in y]:
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', '5pWipX') for x in y]:
                 Common.logger(log_type).info('视频已下载\n')
-            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(
-                    log_type, 'haokan', '7f05d8') for x in y]:
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', '7f05d8') for x in y]:
                 Common.logger(log_type).info('视频已下载\n')
-            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(
-                    log_type, 'haokan', 'A5VCbq') for x in y]:
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'A5VCbq') for x in y]:
+                Common.logger(log_type).info('视频已下载\n')
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'kVaSjf') for x in y]:
                 Common.logger(log_type).info('视频已下载\n')
             else:
                 # 下载

+ 365 - 0
main/haokan_follow.py

@@ -0,0 +1,365 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/1/13
+import datetime
+import os
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.feishu_lib import Feishu
+from main.haokan_publish import Publish
+
+
+class Follow:
+    ctime = ''
+
+    @classmethod
+    def filter_words(cls, log_type):
+        try:
+            filter_words_sheet = Feishu.get_values_batch(log_type, 'haokan', 'nKgHzp')
+            filter_words_list = []
+            for x in filter_words_sheet:
+                for y in x:
+                    if y is None:
+                        pass
+                    else:
+                        filter_words_list.append(y)
+            return filter_words_list
+        except Exception as e:
+            Common.logger(log_type).error(f'filter_words异常:{e}')
+
+    @classmethod
+    def get_users_from_feishu(cls, log_type):
+        try:
+            user_sheet = Feishu.get_values_batch(log_type, 'haokan', 'x4nb7H')
+            user_dict = {}
+            for i in range(1, len(user_sheet)):
+                user_name = user_sheet[i][0]
+                out_id = user_sheet[i][1]
+                our_id = user_sheet[i][3]
+                if user_name is None or out_id is None or our_id is None:
+                    pass
+                else:
+                    user_dict[user_name] = str(out_id) + ',' + str(our_id)
+            return user_dict
+        except Exception as e:
+            Common.logger(log_type).error(f'get_users_from_feishu异常:{e}\n')
+
+    @classmethod
+    def follow_download_rule(cls, duration, width, height):
+        if int(duration) >= 60:
+            if int(width) >= 720 or int(height) >= 720:
+                return True
+            else:
+                return False
+        else:
+            return False
+
+    @classmethod
+    def get_follow_feed(cls, log_type, out_id, our_id, user_name, env):
+        try:
+            while True:
+                url = 'https://haokan.baidu.com/web/author/listall?'
+                headers = {
+                    'Accept': '*/*',
+                    'Accept-Encoding': 'gzip, deflate',
+                    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                    'Cache-Control': 'no-cache',
+                    'Connection': 'keep-alive',
+                    'Content-Type': 'application/x-www-form-urlencoded',
+                    'Cookie': Feishu.get_values_batch(log_type, 'haokan', '5LksMx')[0][0],
+                    'Referer': 'https://haokan.baidu.com/author/'+str(out_id),
+                    'Pragma': 'no-cache',
+                    'sec-ch-ua': '"Not?A_Brand";v="8", "Chromium";v="108", "Microsoft Edge";v="108"',
+                    'sec-ch-ua-mobile': '?0',
+                    'sec-ch-ua-platform': '"macOS"',
+                    'Sec-Fetch-Dest': 'empty',
+                    'Sec-Fetch-Mode': 'cors',
+                    'Sec-Fetch-Site': 'same-origin',
+                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.76'
+                }
+                params = {
+                    'app_id': str(out_id),
+                    'ctime': cls.ctime,
+                    'rn': '10',
+                    'searchAfter': '',
+                    '_api': '1'
+                }
+                response = requests.get(url=url, headers=headers, params=params, verify=False)
+                if '"errno":0,' not in response.text:
+                    Common.logger(log_type).warning(f'get_follow_feed:{response.text}\n')
+                elif len(response.json()['data']['results']) == 0:
+                    Common.logger(log_type).info(f'get_follow_feed:{response.json()}\n')
+                    cls.ctime = 0
+                else:
+                    cls.ctime = response.json()['data']['ctime']
+
+                    follow_feeds = response.json()['data']['results']
+                    for i in range(len(follow_feeds)):
+                        # video_title
+                        if 'title' not in follow_feeds[i]['content']:
+                            video_title = ''
+                        else:
+                            video_title = follow_feeds[i]['content']['title']
+
+                        # video_id
+                        if 'vid' not in follow_feeds[i]['content']:
+                            video_id = ''
+                        else:
+                            video_id = follow_feeds[i]['content']['vid']
+
+                        # is_top
+                        if 'is_show_feature' not in follow_feeds[i]['content']:
+                            is_top = ''
+                        else:
+                            is_top = follow_feeds[i]['content']['is_show_feature']
+
+                        # play_cnt
+                        if 'playcnt' not in follow_feeds[i]['content']:
+                            play_cnt = ''
+                        else:
+                            play_cnt = follow_feeds[i]['content']['playcnt']
+
+                        # duration
+                        if 'duration' not in follow_feeds[i]['content']:
+                            duration = ''
+                            duration_stamp = ''
+                        else:
+                            duration = follow_feeds[i]['content']['duration']
+                            duration_stamp = int(duration.split(':')[0])*60 + int(duration.split(':')[-1])
+
+                        # publish_time
+                        if 'publish_time' not in follow_feeds[i]['content']:
+                            publish_time = ''
+                        else:
+                            publish_time = follow_feeds[i]['content']['publish_time']
+
+                        # publish_time_stamp
+                        if '刚刚' in publish_time:
+                            publish_time_stamp = int(time.time())
+                        elif '分钟前' in publish_time:
+                            publish_time_stamp = int(time.time()) - int(publish_time[0]) * 60
+                        elif '小时前' in publish_time:
+                            publish_time_stamp = int(time.time()) - int(publish_time[0]) * 3600
+                        elif '昨天' in publish_time:
+                            publish_time_str = (datetime.date.today() + datetime.timedelta(days=-1)).strftime("%Y/%m/%d")
+                            publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y/%m/%d")))
+                        elif '天前' in publish_time:
+                            today = datetime.date.today()
+                            publish_time_str = today - datetime.timedelta(days=int(publish_time[0]))
+                            publish_time_stamp = int(time.mktime(publish_time_str.timetuple()))
+                        elif '年' in publish_time:
+                            publish_time_str = publish_time.replace('年', '/').replace('月', '/').replace('日', '')
+                            publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y/%m/%d")))
+                        else:
+                            publish_time_str = publish_time.replace('月', '/').replace('日', '')
+                            this_year = datetime.datetime.now().year
+                            publish_time_stamp = int(time.mktime(time.strptime(f"{this_year}/{publish_time_str}", "%Y/%m/%d")))
+
+                        # cover_url
+                        if 'cover_src' in follow_feeds[i]['content']:
+                            cover_url = follow_feeds[i]['content']['cover_src']
+                        elif 'cover_src_pc' in follow_feeds[i]['content']:
+                            cover_url = follow_feeds[i]['content']['cover_src_pc']
+                        elif 'poster' in follow_feeds[i]['content']:
+                            cover_url = follow_feeds[i]['content']['poster']
+                        else:
+                            cover_url = ''
+
+                        if is_top is True and int(time.time()) - publish_time_stamp >= 3600*24*30:
+                            Common.logger(log_type).info(f'video_title:{video_title}')
+                            Common.logger(log_type).info(f'置顶视频,发布时间超过30天:{publish_time}\n')
+                        elif int(time.time()) - publish_time_stamp >= 3600*24*30:
+                            Common.logger(log_type).info(f'video_title:{video_title}')
+                            Common.logger(log_type).info(f'发布时间超过30天:{publish_time}\n')
+                            cls.ctime = ''
+                            return
+                        else:
+                            video_info_dict = cls.get_video_url(log_type, video_id)
+                            # video_url
+                            video_url = video_info_dict['video_url']
+                            # video_width
+                            video_width = video_info_dict['video_width']
+                            # video_height
+                            video_height = video_info_dict['video_height']
+
+                            Common.logger(log_type).info(f'video_title:{video_title}')
+                            # Common.logger(log_type).info(f'user_name:{user_name}')
+                            # Common.logger(log_type).info(f'out_id:{out_id}')
+                            # Common.logger(log_type).info(f'our_id:{our_id}')
+                            # Common.logger(log_type).info(f'duration_stamp:{duration_stamp}')
+                            Common.logger(log_type).info(f'duration:{duration}')
+                            Common.logger(log_type).info(f'video_width:{video_width}')
+                            Common.logger(log_type).info(f'video_height:{video_height}')
+                            Common.logger(log_type).info(f'publish_time:{publish_time}')
+                            Common.logger(log_type).info(f'video_url:{video_url}\n')
+
+                            video_dict = {
+                                'video_title': video_title,
+                                'video_id': video_id,
+                                'play_cnt': play_cnt,
+                                'duration': duration,
+                                'duration_stamp': duration_stamp,
+                                'publish_time': publish_time,
+                                'video_width': video_width,
+                                'video_height': video_height,
+                                'user_name': user_name,
+                                'cover_url': cover_url,
+                                'video_url': video_url
+                            }
+                            cls.download_publish(log_type, video_dict, our_id, env)
+        except Exception as e:
+            Common.logger(log_type).error(f'get_follow_feed异常:{e}\n')
+
+    @classmethod
+    def get_video_url(cls, log_type, video_id):
+        try:
+            url = 'https://haokan.hao123.com/v?'
+            params = {
+                'vid': str(video_id),
+                '_format': 'json',
+            }
+            headers = {
+                'Accept': '*/*',
+                'Accept-Encoding': 'gzip, deflate, br',
+                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                'Cache-Control': 'no-cache',
+                'Connection': 'keep-alive',
+                'Content-Type': 'application/x-www-form-urlencoded',
+                'Cookie': 'PC_TAB_LOG=video_details_page; COMMON_LID=b0be69dd9fcae328d06935bd40f615cd; Hm_lvt_4aadd610dfd2f5972f1efee2653a2bc5=1669029953; hkpcvideolandquery=%u82CF%u5DDE%u6700%u5927%u7684%u4E8C%u624B%u8F66%u8D85%u5E02%uFF0C%u8F6C%u4E00%u8F6C%u91CC%u8FB9%u8C6A%u8F66%u592A%u591A%u4E86%uFF0C%u4EF7%u683C%u66F4%u8BA9%u6211%u5403%u60CA%uFF01; Hm_lpvt_4aadd610dfd2f5972f1efee2653a2bc5=1669875695; ariaDefaultTheme=undefined; reptileData=%7B%22data%22%3A%22636c55e0319da5169a60acec4a264a35c10862f8abfe2f2cc32c55eb6b0ab4de0efdfa115ea522d6d4d361dea07feae2831d3e2c16ed6b051c611ffe5aded6c9f852501759497b9fbd2132a2160e1e40e5845b41f78121ddcc3288bd077ae4e8%22%2C%22key_id%22%3A%2230%22%2C%22sign%22%3A%22f6752aac%22%7D; RT="z=1&dm=hao123.com&si=uc0q7wnm4w&ss=lb4otu71&sl=j&tt=av0&bcn=https%3A%2F%2Ffclog.baidu.com%2Flog%2Fweirwood%3Ftype%3Dperf&ld=1rdw&cl=7v6c"',
+                'Pragma': 'no-cache',
+                'Referer': 'https://haokan.hao123.com/v?vid=10623278258033022286&pd=pc&context=',
+                'sec-ch-ua': '"Microsoft Edge";v="107", "Chromium";v="107", "Not=A?Brand";v="24"',
+                'sec-ch-ua-mobile': '?0',
+                'sec-ch-ua-platform': '"macOS"',
+                'Sec-Fetch-Dest': 'empty',
+                'Sec-Fetch-Mode': 'cors',
+                'Sec-Fetch-Site': 'same-origin',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.62',
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            if r.status_code != 200:
+                video_url = ''
+                video_width = ''
+                video_height = ''
+                Common.logger(log_type).info(f'get_video_url_response:{r.text}')
+            elif r.json()['errno'] != 0 or len(r.json()['data']) == 0:
+                video_url = ''
+                video_width = ''
+                video_height = ''
+                Common.logger(log_type).info(f'get_video_url_response:{r.json()}')
+            else:
+                clarityUrl = r.json()['data']['apiData']['curVideoMeta']['clarityUrl']
+                video_url = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'][len(clarityUrl) - 1]['url']
+                video_width = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'][len(clarityUrl) - 1]['vodVideoHW'].split('$$')[-1]
+                video_height = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'][len(clarityUrl) - 1]['vodVideoHW'].split('$$')[0]
+
+            video_info_dict = {
+                'video_url': video_url,
+                'video_width': video_width,
+                'video_height': video_height
+            }
+            return video_info_dict
+        except Exception as e:
+            Common.logger(log_type).error(f'get_video_url异常:{e}\n')
+
+    @classmethod
+    def download_publish(cls, log_type, video_dict, our_id, env):
+        try:
+            if video_dict['video_title'] == '' or video_dict['video_id'] == '' or video_dict['video_url'] == '':
+                Common.logger(log_type).info('无效视频\n')
+            elif int(video_dict['duration_stamp']) < 60:
+                Common.logger(log_type).info(f'时长:{int(video_dict["duration"])} < 60s\n')
+            elif int(video_dict['video_width']) < 720 or int(video_dict['video_height']) < 720:
+                Common.logger(log_type).info(f'{int(video_dict["video_width"])}*{int(video_dict["video_height"])} < 720P\n')
+            elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type)) is True:
+                Common.logger(log_type).info('已中过滤词库\n')
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', '5pWipX') for x in y]:
+                Common.logger(log_type).info('视频已下载\n')
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', '7f05d8') for x in y]:
+                Common.logger(log_type).info('视频已下载\n')
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'kVaSjf') for x in y]:
+                Common.logger(log_type).info('视频已下载\n')
+            elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'A5VCbq') for x in y]:
+                Common.logger(log_type).info('视频已下载\n')
+            else:
+                # 下载
+                Common.download_method(log_type, 'cover', video_dict['video_title'], video_dict['cover_url'])
+                Common.download_method(log_type, 'video', video_dict['video_title'], video_dict['video_url'])
+                with open("./videos/" + video_dict['video_title']
+                          + "/" + "info.txt", "a", encoding="UTF-8") as f_a:
+                    f_a.write(str(video_dict['video_id']) + "\n" +
+                              str(video_dict['video_title']) + "\n" +
+                              str(video_dict['duration_stamp']) + "\n" +
+                              '100000' + "\n" +
+                              '100000' + "\n" +
+                              '100000' + "\n" +
+                              '100000' + "\n" +
+                              '1920*1080' + "\n" +
+                              str(int(time.time())) + "\n" +
+                              str(video_dict['user_name']) + "\n" +
+                              str(video_dict['cover_url']) + "\n" +
+                              str(video_dict['video_url']) + "\n" +
+                              str(video_dict['cover_url']) + "\n" +
+                              "HAOKAN" + str(int(time.time())))
+                Common.logger(log_type).info("==========视频信息已保存至info.txt==========")
+
+                # 上传
+                Common.logger(log_type).info(f"开始上传视频:{video_dict['video_title']}")
+                if env == 'dev':
+                    our_video_id = Publish.upload_and_publish(log_type, our_id, env)
+                    our_video_link = "https://testadmin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+                else:
+                    our_video_id = Publish.upload_and_publish(log_type, our_id, env)
+                    our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+                Common.logger(log_type).info(f"视频上传完成:{video_dict['video_title']}\n")
+
+                # 保存视频信息至云文档
+                Common.logger(log_type).info(f"保存视频至已下载表:{video_dict['video_title']}")
+                Feishu.insert_columns(log_type, "haokan", "kVaSjf", "ROWS", 1, 2)
+                upload_time = int(time.time())
+                values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
+                           '定向榜',
+                           video_dict['video_title'],
+                           video_dict['video_id'],
+                           our_video_link,
+                           int(video_dict['play_cnt']),
+                           video_dict['duration'],
+                           video_dict['publish_time'],
+                           video_dict['video_width']+"*"+video_dict['video_height'],
+                           video_dict['user_name'],
+                           video_dict['cover_url'],
+                           video_dict['video_url']]]
+                time.sleep(1)
+                Feishu.update_values(log_type, "haokan", "kVaSjf", "F2:Z2", values)
+                Common.logger(log_type).info(f"视频:{video_dict['video_title']},下载/上传成功\n")
+        except Exception as e:
+            Common.logger(log_type).error(f'download_publish异常:{e}\n')
+
+    @classmethod
+    def get_user_videos(cls, log_type, env):
+        try:
+            user_dict = cls.get_users_from_feishu(log_type)
+            if len(user_dict) == 0:
+                Common.logger(log_type).warning('用户ID列表为空\n')
+            else:
+                for k, v in user_dict.items():
+                    user_name = k
+                    out_id = v.split(',')[0]
+                    our_id = v.split(',')[1]
+                    Common.logger(log_type).info(f'抓取{user_name}主页视频\n')
+                    cls.get_follow_feed(log_type, out_id, our_id, user_name, env)
+                    Common.logger(log_type).info('休眠 30 秒\n')
+                    time.sleep(30)
+                    cls.ctime = ''
+        except Exception as e:
+            Common.logger(log_type).error(f'get_user_videos异常:{e}\n')
+
+
+if __name__ == '__main__':
+    print(Follow.get_users_from_feishu('follow'))
+    pass

+ 2 - 0
main/haokan_hot.py

@@ -182,6 +182,8 @@ class Hot:
             Common.logger(log_type).info('视频已下载\n')
         elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'A5VCbq') for x in y]:
             Common.logger(log_type).info('视频已下载\n')
+        elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'kVaSjf') for x in y]:
+            Common.logger(log_type).info('视频已下载\n')
         else:
             # 下载
             Common.download_method(log_type, 'cover', video_dict['video_title'], video_dict['cover_url'])

+ 8 - 6
main/haokan_publish.py

@@ -34,11 +34,11 @@ class Publish:
         versionCode  版本 默认1
         :return:
         """
-        # Common.logger(log_type).info('publish request data: {}'.format(request_data))
+        Common.logger(log_type).info('publish request data: {}'.format(request_data))
         result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
-        # Common.logger(log_type).info('publish result: {}'.format(result))
+        Common.logger(log_type).info('publish result: {}'.format(result))
         video_id = result["data"]["id"]
-        # Common.logger(log_type).info('video_id: {}'.format(video_id))
+        Common.logger(log_type).info('video_id: {}'.format(video_id))
         if result['code'] != 0:
             Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
         else:
@@ -98,7 +98,9 @@ class Publish:
     access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAIP6x1l3DXfSxm')
     access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', 'KbTaM9ars4OX3PMS6Xm7rtxGr1FLon')
     bucket_name = os.getenv('OSS_TEST_BUCKET', 'art-pubbucket')
+    # OSS 内网
     endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou-internal.aliyuncs.com')
+    # OSS 外网
     # endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
 
     # 确认上面的参数都填写正确了
@@ -161,10 +163,10 @@ class Publish:
         today = time.strftime("%Y%m%d", time.localtime())
         # videos 目录下的所有视频文件夹
         files = os.listdir(cls.local_file_path)
-        for f in files:
+        for fv in files:
             try:
                 # 单个视频文件夹
-                fi_d = os.path.join(cls.local_file_path, f)
+                fi_d = os.path.join(cls.local_file_path, fv)
                 # 确认为视频文件夹
                 if os.path.isdir(fi_d):
                     Common.logger(log_type).info('dir = {}'.format(fi_d))
@@ -257,5 +259,5 @@ class Publish:
                     Common.logger(log_type).error('file not a dir = {}'.format(fi_d))
             except Exception as e:
                 # 删除视频文件夹
-                shutil.rmtree("./videos/" + f + "/")
+                shutil.rmtree("./videos/" + fv + "/")
                 Common.logger(log_type).exception('upload_and_publish error', e)

+ 25 - 0
main/run_haokan_follow.py

@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/1/13
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.haokan_follow import Follow
+
+
+class Main:
+    @classmethod
+    def main(cls, log_type, env):
+        while True:
+            Common.logger(log_type).info('开始抓取"好看视频定向榜"\n')
+            Follow.get_user_videos(log_type, env)
+            Common.del_logs(log_type)
+            Follow.ctime = ''
+            Common.logger(log_type).info('休眠 1 小时\n')
+            time.sleep(3600)
+
+
+if __name__ == '__main__':
+    Main.main('follow', 'prod')