common.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/4/25
  4. """
  5. 公共方法,包含:生成log / 删除log / 下载方法 / 读取文件 / 统计下载数
  6. """
  7. from datetime import date, timedelta
  8. import datetime
  9. import logging
  10. import os
  11. import time
  12. import requests
  13. import urllib3
  14. proxies = {"http": None, "https": None}
  15. class Common:
  16. # 统一获取当前时间 <class 'datetime.datetime'> 2022-04-14 20:13:51.244472
  17. now = datetime.datetime.now()
  18. # 昨天 <class 'str'> 2022-04-13
  19. yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
  20. # 今天 <class 'datetime.date'> 2022-04-14
  21. today = date.today()
  22. # 明天 <class 'str'> 2022-04-15
  23. tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
  24. @staticmethod
  25. def crawler_log():
  26. """
  27. 生成 log 日志
  28. """
  29. # 日志路径
  30. log_dir = r"./logs/"
  31. log_path = os.getcwd() + os.sep + log_dir
  32. if not os.path.isdir(log_path):
  33. os.makedirs(log_path)
  34. # 日志参数
  35. log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
  36. date_format = "%Y-%m-%d %p %H:%M:%S"
  37. log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '.log'
  38. # 日志初始化
  39. logging.basicConfig(filename=log_path + log_name, level=logging.INFO, format=log_format, datefmt=date_format)
  40. crawler_logger = logging.getLogger("crawler-log")
  41. return crawler_logger
  42. @classmethod
  43. def del_logs(cls):
  44. """
  45. 清除冗余日志文件
  46. :return: 保留最近 7 个日志
  47. """
  48. log_dir = r"./logs/"
  49. all_files = sorted(os.listdir(log_dir))
  50. all_logs = []
  51. for log in all_files:
  52. name = os.path.splitext(log)[-1]
  53. if name == ".log":
  54. all_logs.append(log)
  55. if len(all_logs) <= 7:
  56. pass
  57. else:
  58. for file in all_logs[:len(all_logs) - 7]:
  59. os.remove(log_dir + file)
  60. cls.crawler_log().info("清除冗余日志成功")
  61. @classmethod
  62. def download_method(cls, text, d_name, d_url):
  63. """
  64. 下载封面:text == "cover" ; 下载视频:text == "video"
  65. 需要下载的视频标题:d_title
  66. 视频封面,或视频播放地址:d_url
  67. 下载保存路径:"./files/{d_title}/"
  68. """
  69. # 首先创建一个保存该视频相关信息的文件夹
  70. video_dir = "./videos/" + d_name + "/"
  71. if not os.path.exists(video_dir):
  72. os.mkdir(video_dir)
  73. # 下载视频
  74. if text == "video":
  75. # 需要下载的视频地址
  76. video_url = d_url
  77. # 视频名
  78. video_name = "video.mp4"
  79. # 下载视频
  80. urllib3.disable_warnings()
  81. response = requests.get(video_url, stream=True, proxies=proxies, verify=False)
  82. try:
  83. with open(video_dir + video_name, "wb") as f:
  84. for chunk in response.iter_content(chunk_size=10240):
  85. f.write(chunk)
  86. cls.crawler_log().info("==========视频下载完成==========")
  87. except Exception as e:
  88. cls.crawler_log().error("视频下载失败:{}".format(e))
  89. # 下载封面
  90. elif text == "cover":
  91. # 需要下载的封面地址
  92. cover_url = d_url
  93. # 封面名
  94. cover_name = "image.jpg"
  95. # 下载封面
  96. urllib3.disable_warnings()
  97. response = requests.get(cover_url, proxies=proxies, verify=False)
  98. try:
  99. with open(video_dir + cover_name, "wb") as f:
  100. f.write(response.content)
  101. cls.crawler_log().info("==========封面下载完成==========")
  102. except Exception as e:
  103. cls.crawler_log().error("封面下载失败:{}".format(e))
  104. @staticmethod
  105. def read_txt(t_name):
  106. """
  107. 读取 txt 文件
  108. :param t_name: 文件名
  109. :return: 文件内容
  110. """
  111. with open(r"./txt/" + t_name, "r", encoding="UTF-8") as f:
  112. return f.readlines()
  113. @classmethod
  114. def benshanzhufu_download_count(cls):
  115. videoid_path = r"./txt/benshanzhufu_videoid.txt"
  116. count = 0
  117. for count, line in enumerate(open(videoid_path, "rb").readlines()):
  118. count += 1
  119. cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
  120. if __name__ == "__main__":
  121. common = Common()