common.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/3/30
  4. """
  5. 公共方法,包含:生成log / 删除log / 下载方法 / 读取文件 / 统计下载数
  6. """
  7. from datetime import date, timedelta
  8. import datetime
  9. import logging
  10. import os
  11. import time
  12. import requests
  13. import urllib3
  14. proxies = {"http": None, "https": None}
  15. class Common:
  16. # 统一获取当前时间 <class 'datetime.datetime'> 2022-04-14 20:13:51.244472
  17. now = datetime.datetime.now()
  18. # 昨天 <class 'str'> 2022-04-13
  19. yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
  20. # 今天 <class 'datetime.date'> 2022-04-14
  21. today = date.today()
  22. # 明天 <class 'str'> 2022-04-15
  23. tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
  24. @staticmethod
  25. def crawler_log():
  26. """
  27. 生成 log 日志
  28. """
  29. # 日志路径
  30. log_dir = r"./logs/"
  31. log_path = os.getcwd() + os.sep + log_dir
  32. if not os.path.isdir(log_path):
  33. os.makedirs(log_path)
  34. # 日志参数
  35. log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
  36. date_format = "%Y-%m-%d %p %H:%M:%S"
  37. log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '.log'
  38. # 日志初始化
  39. logging.basicConfig(filename=log_path + log_name, level=logging.INFO, format=log_format, datefmt=date_format)
  40. crawler_logger = logging.getLogger("crawler-log")
  41. return crawler_logger
  42. @classmethod
  43. def del_logs(cls):
  44. """
  45. 清除冗余日志文件
  46. :return: 保留最近 7 个日志
  47. """
  48. log_dir = r"./logs/"
  49. all_files = sorted(os.listdir(log_dir))
  50. all_logs = []
  51. for log in all_files:
  52. name = os.path.splitext(log)[-1]
  53. if name == ".log":
  54. all_logs.append(log)
  55. if len(all_logs) <= 7:
  56. pass
  57. else:
  58. for file in all_logs[:len(all_logs) - 7]:
  59. os.remove(log_dir + file)
  60. cls.crawler_log().info("清除冗余日志成功")
  61. @classmethod
  62. def download_method(cls, text, d_name, d_url):
  63. """
  64. 下载封面:text == "cover" ; 下载视频:text == "video"
  65. 需要下载的视频标题:d_title
  66. 视频封面,或视频播放地址:d_url
  67. 下载保存路径:"./files/{d_title}/"
  68. """
  69. # 首先创建一个保存该视频相关信息的文件夹
  70. video_dir = "./videos/" + d_name + "/"
  71. if not os.path.exists(video_dir):
  72. os.mkdir(video_dir)
  73. # 下载视频
  74. if text == "video":
  75. # 需要下载的视频地址
  76. video_url = d_url
  77. # 视频名
  78. video_name = "video.mp4"
  79. # 下载视频
  80. urllib3.disable_warnings()
  81. response = requests.get(video_url, stream=True, proxies=proxies, verify=False)
  82. try:
  83. with open(video_dir + video_name, "wb") as f:
  84. for chunk in response.iter_content(chunk_size=10240):
  85. f.write(chunk)
  86. cls.crawler_log().info("==========视频下载完成==========")
  87. except Exception as e:
  88. cls.crawler_log().info("视频下载失败:{}".format(e))
  89. # except FileNotFoundError:
  90. # cls.kuaishou_log().info("==========视频下载失败==========")
  91. # 下载封面
  92. elif text == "cover":
  93. # 需要下载的封面地址
  94. cover_url = d_url
  95. # 封面名
  96. cover_name = "image.jpg"
  97. # 下载封面
  98. urllib3.disable_warnings()
  99. response = requests.get(cover_url, proxies=proxies, verify=False)
  100. try:
  101. with open(video_dir + cover_name, "wb") as f:
  102. f.write(response.content)
  103. cls.crawler_log().info("==========封面下载完成==========")
  104. except Exception as e:
  105. cls.crawler_log().info("封面下载失败:{}".format(e))
  106. # except FileNotFoundError:
  107. # cls.kuaishou_log().info("==========封面下载失败==========")
  108. @staticmethod
  109. def read_txt(t_name):
  110. """
  111. 读取 txt 文件
  112. :param t_name: 文件名
  113. :return: 文件内容
  114. """
  115. with open(r"./txt/" + t_name, "r", encoding="UTF-8") as f:
  116. return f.readlines()
  117. @classmethod
  118. def kuaishou_download_count(cls):
  119. videoid_path = r"./txt/kuaishou_videoid.txt"
  120. count = 0
  121. for count, line in enumerate(open(videoid_path, "rb").readlines()):
  122. count += 1
  123. cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
  124. @classmethod
  125. def weishi_download_count(cls):
  126. videoid_path = r"./txt/weishi_videoid.txt"
  127. count = 0
  128. for count, line in enumerate(open(videoid_path, "rb").readlines()):
  129. count += 1
  130. cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
  131. @classmethod
  132. def kuaishou_today_download_count(cls):
  133. """
  134. 统计快手渠道当日下载视频数
  135. :return:
  136. """
  137. # 创建空文件
  138. with open(r"./txt/" + str(cls.today) + "_kuaishou_videoid.txt", "a") as f:
  139. f.write("")
  140. videoid_path = r"./txt/" + str(cls.today) + "_kuaishou_videoid.txt"
  141. count = 0
  142. for count, line in enumerate(open(videoid_path, "rb").readlines()):
  143. count += 1
  144. return count
  145. @classmethod
  146. def del_yesterday_kuaishou_videoid_txt(cls):
  147. """
  148. 删除快手渠道昨日下载视频数的 txt 文件
  149. :return:
  150. """
  151. yesterday_kuaishou_videoid_txt_dir = r"./txt/"
  152. all_files = sorted(os.listdir(yesterday_kuaishou_videoid_txt_dir))
  153. for file in all_files:
  154. name = os.path.splitext(file)[0]
  155. if name == cls.yesterday + "_kuaishou_videoid":
  156. os.remove(yesterday_kuaishou_videoid_txt_dir + file)
  157. Common.crawler_log().info("删除快手昨天下载统计文件成功")
  158. if __name__ == "__main__":
  159. common = Common()
  160. common.del_yesterday_kuaishou_videoid_txt()
  161. print(common.kuaishou_today_download_count())