소스 검색

全自动爬取

wangkun 2 년 전
부모
커밋
00bff02a76
10개의 변경된 파일556개의 추가작업 그리고 57개의 파일을 삭제
  1. 36 39
      README.md
  2. 0 0
      chlsfiles/charles202208041536.txt
  3. 38 0
      main/auto.py
  4. 7 8
      main/common.py
  5. 3 3
      main/demo.py
  6. 1 1
      main/feishu_lib.py
  7. 427 0
      main/gzh.py
  8. 4 4
      main/publish.py
  9. 38 0
      main/run_gzh.py
  10. 2 2
      main/run_gzh_recommend.py

+ 36 - 39
README.md

@@ -1,41 +1,38 @@
+#****************************************************************************************************
+#
 # 微信公众号爬虫
-
-git:https://git.yishihui.com/Server/crawler_gzh.git
-
-feishu:https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?
-
-loguru==0.6.0
-oss2==2.15.0
-requests==2.27.1
-urllib3==1.26.9
-python==3.10
-
-
-# 入口:
-
-cd ./crawler
-
-python3 ./crawler_gzh/main/run_xxx.py
-
-
+# git:https://git.yishihui.com/Server/crawler_gzh.git
+# feishu:https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?
+#
+# 依赖库文件
+# loguru==0.6.0
+# oss2==2.15.0
+# requests==2.27.1
+# urllib3==1.26.9
+# python==3.10
+# 
+# 新入口
+# cd ./crawler_gzh
+# python3 main/run_gzh.py 
+#
+# 旧入口:
+# cd ./crawler
+# python3 ./crawler_gzh/main/run_gzh_recommend.py
+#
+#
 # 需求列表
-
-2022/8/4 https://w42nne6hzg.feishu.cn/docx/doxcndwbtMudFHh7r4alaJoykke
-
-1、任务开始时间
-
-- 每天早上8点-晚上21点
-
-2、抓取规则:
-
-- 视频时长1分钟以上,20分钟以下 
-- 
-- 站内标题=公众号 视频原标题 
-- 
-- 站内封面图=公众号 视频原封面图
-
-3、站内承接:
-
-- 每日入库100条视频 
-
-- 视频随机分配到5个虚拟账号。uid列表:
+# 2022/8/17
+# 1.抓取用户主页 2021 年之后发布的视频
+# 2.每个用户每天抓取 10 条
+#
+# 2022/8/4 https://w42nne6hzg.feishu.cn/docx/doxcndwbtMudFHh7r4alaJoykke
+# 1、任务开始时间
+# 每天早上8点-晚上21点
+# 2、抓取规则:
+# 视频时长1分钟以上,20分钟以下
+# 站内标题=公众号 视频原标题
+# 站内封面图=公众号 视频原封面图
+# 3、站内承接:
+# 每日入库100条视频
+# 视频随机分配到5个虚拟账号。uid列表:[20631273, 20631274, 20631275, 20631276, 20631277]
+#****************************************************************************************************

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 0 - 0
chlsfiles/charles202208041536.txt


+ 38 - 0
main/auto.py

@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/15
+import atomacos
+# from atomacos import keyboard
+from time import sleep
+
+bundle_id = "com.tencent.xinWeChat"
+
+# 启动应用
+atomacos.launchAppByBundleId(bundle_id)
+atomator = atomacos.getAppRefByBundleId(bundle_id)
+sleep(3)
+
+# 获取当前窗口
+wx = atomator.windows()[0]
+print(wx)
+
+# 获取输入框
+lt = wx.findFirstR(AXRole="AXRadioButton", AXHelp="微信")
+print(lt)
+
+# 获取位置参数
+lt_position = lt.AXPosition
+lt_size = lt.AXSize
+# 注意AXPositon得到的坐标是元素左上角的坐标,需要根据实际大小得到元素中心点坐标
+lt_click = (lt_position[0] + lt_size[0] / 2, lt_position[1] + lt_size[1])
+print(lt_click)
+lt.clickMouseButtonLeft(lt_position)
+
+# 输入内容(输入键盘字符,US_keyboard)
+# s1 = lt.findFirstR(AXRole='AXTextArea', AXRoleDescription='文本输入区')
+s1 = lt.findFirstR(AXRole='AXTextArea', AXLable='搜索')
+s1_p = s1.AXPosition
+s1_s = s1.AXSize
+s1.tripleClickMouse((s1_p[0] + s1_s[0] / 2, s1_p[1] + s1_s[1] / 2))
+s1.sendKeys('公众号')
+

+ 7 - 8
main/common.py

@@ -11,7 +11,6 @@ import os
 import time
 import requests
 import urllib3
-
 proxies = {"http": None, "https": None}
 
 
@@ -32,7 +31,7 @@ class Common:
         使用 logger 模块生成日志
         """
         # 日志路径
-        log_dir = "./crawler_gzh/logs/"
+        log_dir = "./logs/"
         log_path = os.getcwd() + os.sep + log_dir
         if not os.path.isdir(log_path):
             os.makedirs(log_path)
@@ -63,7 +62,7 @@ class Common:
         :d_dir: 需要删除的 log 地址
         :return: 保留最近 6 个日志
         """
-        logs_dir = "./crawler_gzh/logs/"
+        logs_dir = "./logs/"
         if not os.path.exists(logs_dir):
             os.mkdir(logs_dir)
 
@@ -79,7 +78,7 @@ class Common:
         else:
             for file in all_logs[:len(all_logs) - 6]:
                 os.remove(logs_dir + file)
-        cls.logger(log_type).info("清除冗余日志成功")
+        cls.logger(log_type).info("清除冗余日志成功\n")
 
     # 封装下载视频或封面的方法
     @classmethod
@@ -90,11 +89,11 @@ class Common:
         视频封面,或视频播放地址:d_url
         下载保存路径:"./files/{d_title}/"
         """
-        videos_dir = "./crawler_gzh/videos/"
+        videos_dir = "./videos/"
         if not os.path.exists(videos_dir):
             os.mkdir(videos_dir)
         # 首先创建一个保存该视频相关信息的文件夹
-        video_dir = "./crawler_gzh/videos/" + d_name + "/"
+        video_dir = "./videos/" + d_name + "/"
         if not os.path.exists(video_dir):
             os.mkdir(video_dir)
 
@@ -114,7 +113,7 @@ class Common:
                         f.write(chunk)
                 cls.logger(log_type).info("==========视频下载完成==========")
             except Exception as e:
-                cls.logger(log_type).exception("视频下载失败:{}", e)
+                cls.logger(log_type).exception("视频下载失败:{}\n", e)
 
         # 下载封面
         elif text == "cover":
@@ -133,7 +132,7 @@ class Common:
                     f.write(response.content)
                 cls.logger(log_type).info("==========封面下载完成==========")
             except Exception as e:
-                cls.logger(log_type).exception("封面下载失败:{}", e)
+                cls.logger(log_type).exception("封面下载失败:{}\n", e)
 
 
 if __name__ == "__main__":

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 3 - 3
main/demo.py


+ 1 - 1
main/feishu_lib.py

@@ -4,7 +4,7 @@
 import json
 import requests
 import urllib3
-from crawler_gzh.main.common import Common
+from main.common import Common
 proxies = {"http": None, "https": None}
 
 

+ 427 - 0
main/gzh.py

@@ -0,0 +1,427 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/16
+import os
+import random
+import sys
+import time
+import ffmpeg
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.feishu_lib import Feishu
+from main.publish import Publish
+
+
+class GZH:
+    # 翻页参数
+    begin = 0
+    # 每个用户抓取文章数量
+    gzh_count = []
+
+    # 获取已下载视频宽高、时长等信息
+    @classmethod
+    def get_video_info_from_local(cls, video_path):
+        probe = ffmpeg.probe(video_path)
+        # print('video_path: {}'.format(video_path))
+        # format1 = probe['format']
+        # bit_rate = int(format1['bit_rate']) / 1000
+        # duration = format['duration']
+        # size = int(format1['size']) / 1024 / 1024
+        video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
+        if video_stream is None:
+            print('No video stream found!')
+            return
+        width = int(video_stream['width'])
+        height = int(video_stream['height'])
+        # num_frames = int(video_stream['nb_frames'])
+        # fps = int(video_stream['r_frame_rate'].split('/')[0]) / int(video_stream['r_frame_rate'].split('/')[1])
+        duration = float(video_stream['duration'])
+        # print('width: {}'.format(width))
+        # print('height: {}'.format(height))
+        # print('num_frames: {}'.format(num_frames))
+        # print('bit_rate: {}k'.format(bit_rate))
+        # print('fps: {}'.format(fps))
+        # print('size: {}MB'.format(size))
+        # print('duration: {}'.format(duration))
+        return width, height, duration
+
+    # 获取 搜索词/token
+    @classmethod
+    def get_cookie_token(cls, log_type, text):
+        try:
+            sheet = Feishu.get_values_batch(log_type, "gzh", "pxHL2C")
+            token = sheet[0][1]
+            cookie = sheet[1][1]
+            if text == "cookie":
+                return cookie
+            elif text == "token":
+                return token
+        except Exception as e:
+            Common.logger(log_type).error("get_cookie_token:{}\n", e)
+
+    # 根据关键字搜索 UP 主信息,并写入电影票(勿动)
+    @classmethod
+    def search_user_by_word(cls, log_type):
+        try:
+            sheet = Feishu.get_values_batch(log_type, "gzh", "pxHL2C")
+            for i in range(3, len(sheet)):
+                word = sheet[i][0]
+                index = sheet[i][1]
+                url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+                headers = {
+                    "accept": "*/*",
+                    "accept-encoding": "gzip, deflate, br",
+                    "accept-language": "zh-CN,zh;q=0.9",
+                    "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                               "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                               "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                    'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                    "sec-ch-ua-mobile": "?0",
+                    "sec-ch-ua-platform": '"Windows"',
+                    "sec-fetch-dest": "empty",
+                    "sec-fetch-mode": "cors",
+                    "sec-fetch-site": "same-origin",
+                    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                                  " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                    "x-requested-with": "XMLHttpRequest",
+                    'cookie': cls.get_cookie_token(log_type, "cookie"),
+                }
+                params = {
+                    "action": "search_biz",
+                    "begin": "0",
+                    "count": "5",
+                    "query": word,
+                    "token": cls.get_cookie_token(log_type, "token"),
+                    "lang": "zh_CN",
+                    "f": "json",
+                    "ajax": "1",
+                }
+                urllib3.disable_warnings()
+                r = requests.get(url=url, headers=headers, params=params, verify=False)
+                if "list" not in r.json() or len(r.json()["list"]) == 0:
+                    Common.logger(log_type).warning("search_user_by_word:{}", r.text)
+                else:
+                    fakeid = r.json()["list"][int(index)-1]["fakeid"]
+                    head_url = r.json()["list"][int(index)-1]["round_head_img"]
+                    time.sleep(0.5)
+                    Common.logger(log_type).info("获取{}的fakeid成功", word)
+                    Feishu.update_values(log_type, 'gzh', 'pxHL2C', 'C'+str(i+1)+':C'+str(i+1), [[fakeid]])
+                    time.sleep(0.5)
+                    Common.logger(log_type).info("获取{}的头像成功", word)
+                    Feishu.update_values(log_type, 'gzh', 'pxHL2C', 'D'+str(i+1)+':D'+str(i+1), [[head_url]])
+            Common.logger(log_type).info("获取所有用户及ID信息完成\n")
+        except Exception as e:
+            Common.logger(log_type).error("search_user_by_word异常:{}\n", e)
+
+    # 获取视频下载链接
+    @classmethod
+    def get_url(cls, log_type, url):
+        try:
+            payload = {}
+            headers = {
+                'Cookie': 'rewardsn=; wxtokenkey=777'
+            }
+            urllib3.disable_warnings()
+            response = requests.get(url=url, headers=headers, data=payload, verify=False)
+            # print(response.text)
+            response_list = response.text.splitlines()
+            video_url_list = []
+            for m in response_list:
+                if "mpvideo.qpic.cn" in m:
+                    video_url = m.split("url: '")[1].split("',")[0].replace(r"\x26amp;", "&")
+                    video_url_list.append(video_url)
+            video_url = video_url_list[0]
+            return video_url
+        except Exception as e:
+            Common.logger(log_type).error("get_url异常:{}\n", e)
+
+    # 获取公众号文章信息,并写入文章列表
+    @classmethod
+    def get_gzh_url(cls, log_type, username, userid, head_url):
+        while True:
+            try:
+                url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                headers = {
+                    "accept": "*/*",
+                    "accept-encoding": "gzip, deflate, br",
+                    "accept-language": "zh-CN,zh;q=0.9",
+                    "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                               "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                               "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                    'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                    "sec-ch-ua-mobile": "?0",
+                    "sec-ch-ua-platform": '"Windows"',
+                    "sec-fetch-dest": "empty",
+                    "sec-fetch-mode": "cors",
+                    "sec-fetch-site": "same-origin",
+                    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                                  " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                    "x-requested-with": "XMLHttpRequest",
+                    'cookie': cls.get_cookie_token(log_type, "cookie"),
+                }
+                params = {
+                    "action": "list_ex",
+                    "begin": str(cls.begin),
+                    "count": "5",
+                    "fakeid": userid,
+                    "type": "9",
+                    "query": "",
+                    "token": cls.get_cookie_token(log_type, "token"),
+                    "lang": "zh_CN",
+                    "f": "json",
+                    "ajax": "1",
+                }
+                urllib3.disable_warnings()
+                r = requests.get(url=url, headers=headers, params=params, verify=False)
+                cls.begin += 5
+                if 'app_msg_list' not in r.json() or len(r.json()['app_msg_list']) == 0:
+                    Common.logger(log_type).warning("get_gzh_url:response:{}", r.text)
+                    break
+                else:
+                    app_msg_list = r.json()['app_msg_list']
+                    for gzh_url in app_msg_list:
+                        # print(gzh_url)
+
+                        # title
+                        if 'title' in gzh_url:
+                            title = gzh_url['title']
+                        else:
+                            title = 0
+
+                        # aid
+                        if 'aid' in gzh_url:
+                            aid = gzh_url['aid']
+                        else:
+                            aid = 0
+
+                        # create_time
+                        if 'create_time' in gzh_url:
+                            create_time = gzh_url['create_time']
+                        else:
+                            create_time = 0
+
+                        # duration
+                        if 'duration' in gzh_url:
+                            duration = gzh_url['duration']
+                        else:
+                            duration = 0
+
+                        # cover_url
+                        if 'cover' in gzh_url:
+                            cover_url = gzh_url['cover']
+                        else:
+                            cover_url = 0
+
+                        # gzh_url
+                        if 'link' in gzh_url:
+                            gzh_url = gzh_url['link']
+                        else:
+                            gzh_url = 0
+
+                        play_cnt = 0
+                        like_cnt = 0
+                        video_url = cls.get_url(log_type, gzh_url)
+
+                        Common.logger(log_type).info("title:{}", title)
+                        Common.logger(log_type).info("aid:{}", aid)
+                        Common.logger(log_type).info("create_time:{}", create_time)
+                        Common.logger(log_type).info("duration:{}", duration)
+                        Common.logger(log_type).info("cover_url:{}", cover_url)
+                        Common.logger(log_type).info("gzh_url:{}", gzh_url)
+
+                        # 判断无效文章
+                        if gzh_url == 0:
+                            Common.logger(log_type).info("无效文章\n")
+                        # 时长判断
+                        elif int(duration) < 60:
+                            Common.logger(log_type).info("时长:{}<60秒\n", duration)
+                        # 已下载表去重
+                        elif str(aid) in [x for y in Feishu.get_values_batch(log_type, "gzh", "fCs3BT") for x in y]:
+                            Common.logger(log_type).info("文章已下载\n")
+                        # 文章去重
+                        elif str(aid) in [x for y in Feishu.get_values_batch(log_type, "gzh", "P6GKb3") for x in y]:
+                            Common.logger(log_type).info("文章已存在\n")
+                        else:
+                            # 已抓取文章列表添加当前文章ID
+                            cls.gzh_count.append(aid)
+                            # 公众号文章表插入行
+                            upload_time = time.time()
+                            Feishu.insert_columns(log_type, 'gzh', 'P6GKb3', 'ROWS', 1, 2)
+                            # 抓取到的文章写入飞书表
+                            values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
+                                       '公众号',
+                                       title,
+                                       str(aid),
+                                       play_cnt,
+                                       like_cnt,
+                                       duration,
+                                       "宽*高",
+                                       time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(create_time)),
+                                       username,
+                                       userid,
+                                       head_url,
+                                       cover_url,
+                                       gzh_url,
+                                       video_url]]
+                            time.sleep(1)
+                            Feishu.update_values(log_type, 'gzh', 'P6GKb3', 'F2:W2', values)
+                            Common.logger(log_type).info("文章写入文档成功\n")
+
+                            if len(cls.gzh_count) >= 10:
+                                Common.logger(log_type).info("当前用户已抓取:{}条数据\n", len(cls.gzh_count))
+                                cls.gzh_count = []
+                                return
+            except Exception as e:
+                Common.logger(log_type).error("get_gzh_url异常:{}\n", e)
+
+    # 获取所有用户的公众号文章信息
+    @classmethod
+    def get_all_gzh(cls, log_type):
+        try:
+            user_sheet = Feishu.get_values_batch(log_type, 'gzh', 'pxHL2C')
+            for i in range(3, len(user_sheet)):
+                username = user_sheet[i][0]
+                userid = user_sheet[i][2]
+                head_url = user_sheet[i][3]
+                Common.logger(log_type).info("获取 {} 公众号文章\n", username)
+                cls.get_gzh_url(log_type, username, userid, head_url)
+        except Exception as e:
+            Common.logger(log_type).error("get_all_gzh异常:{}\n", e)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, env):
+        try:
+            gzh_sheet = Feishu.get_values_batch(log_type, 'gzh', 'P6GKb3')
+            for i in range(1, len(gzh_sheet)-28):
+                download_title = gzh_sheet[i][7]
+                download_vid = gzh_sheet[i][8]
+                download_play_cnt = gzh_sheet[i][9]
+                download_like_cnt = gzh_sheet[i][10]
+                download_duration = gzh_sheet[i][11]
+                download_send_time = gzh_sheet[i][13]
+                download_username = gzh_sheet[i][14]
+                download_userid = gzh_sheet[i][15]
+                download_head_url = gzh_sheet[i][16]
+                download_cover_url = gzh_sheet[i][17]
+                download_video_url = gzh_sheet[i][19]
+                download_video_comment_cnt = 0
+                download_video_share_cnt = 0
+
+                Common.logger(log_type).info("download_title:{}", download_title)
+                Common.logger(log_type).info("download_send_time:{}", download_send_time)
+                Common.logger(log_type).info("download_username:{}", download_username)
+                Common.logger(log_type).info("download_video_url:{}", download_video_url)
+                # Common.logger(log_type).info("download_vid:{}", download_vid)
+                # Common.logger(log_type).info("download_play_cnt:{}", download_play_cnt)
+                # Common.logger(log_type).info("download_like_cnt:{}", download_like_cnt)
+                # Common.logger(log_type).info("download_duration:{}", download_duration)
+                # Common.logger(log_type).info("download_userid:{}", download_userid)
+                # Common.logger(log_type).info("download_head_url:{}", download_head_url)
+                # Common.logger(log_type).info("download_cover_url:{}", download_cover_url)
+
+                # 判断空行
+                if download_video_url is None or download_title is None:
+                    Feishu.dimension_range(log_type, 'gzh', 'P6GKb3', 'ROWS', i+1, i+1)
+                    Common.logger(log_type).info("空行,删除成功\n")
+                # 已下载判断
+                elif str(download_vid) in [x for y in Feishu.get_values_batch(log_type, 'gzh', 'fCs3BT') for x in y]:
+                    Feishu.dimension_range(log_type, 'gzh', 'P6GKb3', 'ROWS', i + 1, i + 1)
+                    Common.logger(log_type).info("视频已下载\n")
+                # 已下载判断
+                elif str(download_title) in [x for y in Feishu.get_values_batch(log_type, 'gzh', 'fCs3BT') for x in y]:
+                    Feishu.dimension_range(log_type, 'gzh', 'P6GKb3', 'ROWS', i + 1, i + 1)
+                    Common.logger(log_type).info("视频已下载\n")
+                else:
+                    # 下载封面
+                    Common.download_method(log_type=log_type, text="cover",
+                                           d_name=str(download_title), d_url=str(download_cover_url))
+                    # 下载视频
+                    Common.download_method(log_type=log_type, text="video",
+                                           d_name=str(download_title), d_url=str(download_video_url))
+                    # 获取视频宽高
+                    video_info = cls.get_video_info_from_local("./videos/" + download_title + "/video.mp4")
+                    download_video_resolution = str(video_info[0]) + "*" + str(video_info[1])
+
+                    # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+                    with open("./videos/" + download_title
+                              + "/" + "info.txt", "a", encoding="UTF-8") as f_a:
+                        f_a.write(str(download_vid) + "\n" +
+                                  str(download_title) + "\n" +
+                                  str(int(download_duration)) + "\n" +
+                                  str(download_play_cnt) + "\n" +
+                                  str(download_video_comment_cnt) + "\n" +
+                                  str(download_like_cnt) + "\n" +
+                                  str(download_video_share_cnt) + "\n" +
+                                  str(download_video_resolution) + "\n" +
+                                  str(int(time.mktime(
+                                      time.strptime(download_send_time, "%Y/%m/%d %H:%M:%S")))) + "\n" +
+                                  str(download_username) + "\n" +
+                                  str(download_head_url) + "\n" +
+                                  str(download_video_url) + "\n" +
+                                  str(download_cover_url) + "\n" +
+                                  "benshanzhufu")
+                    Common.logger(log_type).info("==========视频信息已保存至info.txt==========")
+
+                    # 上传视频
+                    Common.logger(log_type).info("开始上传视频:{}".format(download_title))
+                    our_video_id = Publish.upload_and_publish(log_type, env, "play")
+                    our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+                    Common.logger(log_type).info("视频上传完成:{}", download_title)
+
+                    # 保存视频 ID 到云文档
+                    Common.logger(log_type).info("保存视频ID至云文档:{}", download_title)
+                    # 视频ID工作表,插入首行
+                    Feishu.insert_columns(log_type, "gzh", "fCs3BT", "ROWS", 1, 2)
+                    # 视频ID工作表,首行写入数据
+                    upload_time = int(time.time())
+                    values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
+                               "公众号",
+                               str(download_title),
+                               str(download_vid),
+                               our_video_link,
+                               download_play_cnt,
+                               download_like_cnt,
+                               download_duration,
+                               str(download_video_resolution),
+                               str(download_send_time),
+                               str(download_username),
+                               str(download_userid),
+                               str(download_head_url),
+                               str(download_cover_url),
+                               str(download_video_url)]]
+                    time.sleep(1)
+                    Feishu.update_values(log_type, "gzh", "fCs3BT", "D2:W2", values)
+
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range(log_type, "gzh", "P6GKb3", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).info("视频:{},下载/上传成功\n", download_title)
+                    return
+        except Exception as e:
+            Common.logger(log_type).error("download_publish异常:{}\n", e)
+
+    # 执行下载/上传
+    @classmethod
+    def run_download_publish(cls, log_type, env):
+        try:
+            while True:
+                time.sleep(1)
+                gzh_sheet = Feishu.get_values_batch(log_type, 'gzh', 'P6GKb3')
+                if len(gzh_sheet) == 1:
+                    Common.logger(log_type).info("下载/上传完成\n")
+                    break
+                else:
+                    cls.download_publish(log_type, env)
+                    time.sleep(random.randint(5, 10))
+        except Exception as e:
+            Common.logger(log_type).error("run_download_publish异常:{}\n", e)
+
+
+if __name__ == "__main__":
+    # GZH.search_user_by_word("gzh")
+    # GZH.get_all_gzh('gzh')
+    # GZH.download_publish('gzh', 'dev')
+    # print(GZH.get_cookie_token('gzh', 'token'))
+    GZH.get_gzh_url('gzh', '何静同学', 'MzkyODMzODQ2Mg==', 'http://mmbiz.qpic.cn/mmbiz_png/go7km0I9Dg3NTxRdMs8MIC6DricCibEdH3OVnEFLmspaVB67iaLdje4lCHFsdjqdXpelf5EicPwHfLWibHWCg5R5urg/0?wx_fmt=png')

+ 4 - 4
main/publish.py

@@ -9,7 +9,7 @@ import time
 import oss2
 import requests
 import urllib3
-from crawler_gzh.main.common import Common
+from main.common import Common
 
 proxies = {"http": None, "https": None}
 
@@ -136,7 +136,7 @@ class Publish:
         os.rmdir(local_file)
         Common.logger(log_type).info("remove local file dir = {} success".format(local_file))
 
-    local_file_path = '.\\crawler_gzh\\videos'
+    local_file_path = './videos'
     video_file = 'video'
     image_file = 'image'
     info_file = 'info'
@@ -185,7 +185,7 @@ class Publish:
                     # 单个视频文件夹下的所有视频文件
                     for fi in dir_files:
                         # 视频文件夹下的所有文件路径
-                        fi_path = fi_d + '\\' + fi
+                        fi_path = fi_d + '/' + fi
                         Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
                         # 读取 info.txt,赋值给 data
                         if cls.info_file in fi:
@@ -212,7 +212,7 @@ class Publish:
                     # 刷新数据
                     dir_files = os.listdir(fi_d)
                     for fi in dir_files:
-                        fi_path = fi_d + '\\' + fi
+                        fi_path = fi_d + '/' + fi
                         # Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
                         # 上传oss
                         if cls.video_file in fi:

+ 38 - 0
main/run_gzh.py

@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/17
+import datetime
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.gzh import GZH
+
+
+class Main:
+    @classmethod
+    def main(cls, env):
+        while True:
+            if 21 >= datetime.datetime.now().hour >= 8:
+                # 获取用户及 ID 信息
+                Common.logger('gzh').info("今日公众号抓取任务开始\n")
+                GZH.search_user_by_word('gzh')
+                # 获取所有用户的公众号文章信息
+                Common.logger('gzh').info("获取用户文章信息\n")
+                GZH.get_all_gzh('gzh')
+                # 下载/上传
+                Common.logger('gzh').info("下载/上传\n")
+                GZH.run_download_publish('gzh', env)
+                # 清除日志
+                Common.del_logs('gzh')
+                # 翻页初始化
+                GZH.gzh_count = []
+                Common.logger('gzh').info("今日公众号抓取任务结束,休眠{}小时\n", 24-datetime.datetime.now().hour)
+                time.sleep(3600 * (24-datetime.datetime.now().hour))
+            else:
+                pass
+
+
+if __name__ == "__main__":
+    Main.main('prod')

+ 2 - 2
main/run_gzh_recommend.py

@@ -8,8 +8,8 @@ import sys
 import time
 
 sys.path.append(os.getcwd())
-from crawler_gzh.main.gzh_recommend import Recommend
-from crawler_gzh.main.common import Common
+from main.gzh_recommend import Recommend
+from main.common import Common
 
 
 class Main:

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.