common.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/3/30
  4. """
  5. 公共方法,包含:生成log / 删除log / 下载方法 / 读取文件 / 统计下载数
  6. """
  7. from datetime import date, timedelta
  8. import datetime
  9. import logging
  10. import os
  11. import time
  12. import requests
  13. import urllib3
  14. class Common:
  15. # 统一获取当前时间 <class 'datetime.datetime'> 2022-04-14 20:13:51.244472
  16. now = datetime.datetime.now()
  17. # 昨天 <class 'str'> 2022-04-13
  18. yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
  19. # 今天 <class 'datetime.date'> 2022-04-14
  20. today = date.today()
  21. # 明天 <class 'str'> 2022-04-15
  22. tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
  23. @staticmethod
  24. def crawler_log():
  25. """
  26. 生成 log 日志
  27. """
  28. # 日志路径
  29. log_dir = "./logs/"
  30. log_path = os.getcwd() + os.sep + log_dir
  31. if not os.path.isdir(log_path):
  32. os.makedirs(log_path)
  33. # 日志参数
  34. log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
  35. date_format = "%Y-%m-%d %p %H:%M:%S"
  36. log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '.log'
  37. # 日志初始化
  38. logging.basicConfig(filename=log_path + log_name, level=logging.INFO, format=log_format, datefmt=date_format)
  39. crawler_logger = logging.getLogger("crawler-log")
  40. return crawler_logger
  41. @classmethod
  42. def del_logs(cls):
  43. """
  44. 清除冗余日志文件
  45. :return: 保留最近 7 个日志
  46. """
  47. log_dir = "./logs/"
  48. all_files = sorted(os.listdir(log_dir))
  49. all_logs = []
  50. for log in all_files:
  51. name = os.path.splitext(log)[-1]
  52. if name == ".log":
  53. all_logs.append(log)
  54. if len(all_logs) <= 7:
  55. pass
  56. else:
  57. for file in all_logs[:len(all_logs) - 7]:
  58. os.remove(log_dir + file)
  59. cls.crawler_log().info("清除冗余日志成功")
  60. @classmethod
  61. def download_method(cls, text, d_name, d_url):
  62. """
  63. 下载封面:text == "cover" ; 下载视频:text == "video"
  64. 需要下载的视频标题:d_title
  65. 视频封面,或视频播放地址:d_url
  66. 下载保存路径:"./files/{d_title}/"
  67. """
  68. # 首先创建一个保存该视频相关信息的文件夹
  69. video_dir = "./videos/" + d_name + "/"
  70. if not os.path.exists(video_dir):
  71. os.mkdir(video_dir)
  72. # 下载视频
  73. if text == "video":
  74. # 需要下载的视频地址
  75. video_url = d_url
  76. # 视频名
  77. video_name = "video.mp4"
  78. # 下载视频
  79. urllib3.disable_warnings()
  80. response = requests.get(video_url, stream=True, verify=False)
  81. try:
  82. with open(video_dir + video_name, "wb") as f:
  83. for chunk in response.iter_content(chunk_size=10240):
  84. f.write(chunk)
  85. cls.crawler_log().info("==========视频下载完成==========")
  86. except Exception as e:
  87. cls.crawler_log().info("视频下载失败:{}".format(e))
  88. # except FileNotFoundError:
  89. # cls.kuaishou_log().info("==========视频下载失败==========")
  90. # 下载封面
  91. elif text == "cover":
  92. # 需要下载的封面地址
  93. cover_url = d_url
  94. # 封面名
  95. cover_name = "image.jpg"
  96. # 下载封面
  97. urllib3.disable_warnings()
  98. response = requests.get(cover_url, verify=False)
  99. try:
  100. with open(video_dir + cover_name, "wb") as f:
  101. f.write(response.content)
  102. cls.crawler_log().info("==========封面下载完成==========")
  103. except Exception as e:
  104. cls.crawler_log().info("封面下载失败:{}".format(e))
  105. # except FileNotFoundError:
  106. # cls.kuaishou_log().info("==========封面下载失败==========")
  107. @staticmethod
  108. def read_txt(t_name):
  109. """
  110. 读取 txt 文件
  111. :param t_name: 文件名
  112. :return: 文件内容
  113. """
  114. with open("./txt/" + t_name, "r", encoding="utf8") as f:
  115. return f.readlines()
  116. @classmethod
  117. def kuaishou_download_count(cls):
  118. videoid_path = "./txt/kuaishou_videoid.txt"
  119. count = 0
  120. for count, line in enumerate(open(videoid_path, "rb").readlines()):
  121. count += 1
  122. cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
  123. @classmethod
  124. def weishi_download_count(cls):
  125. videoid_path = "./txt/weishi_videoid.txt"
  126. count = 0
  127. for count, line in enumerate(open(videoid_path, "rb").readlines()):
  128. count += 1
  129. cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
  130. @classmethod
  131. def kuaishou_today_download_count(cls):
  132. """
  133. 统计快手渠道当日下载视频数
  134. :return:
  135. """
  136. # 创建空文件
  137. with open("./txt/" + str(cls.today) + "_kuaishou_videoid.txt", "a") as f:
  138. f.write("")
  139. videoid_path = "./txt/" + str(cls.today) + "_kuaishou_videoid.txt"
  140. count = 0
  141. for count, line in enumerate(open(videoid_path, "rb").readlines()):
  142. count += 1
  143. return count
  144. @classmethod
  145. def del_yesterday_kuaishou_videoid_txt(cls):
  146. """
  147. 删除快手渠道昨日下载视频数的 txt 文件
  148. :return:
  149. """
  150. yesterday_kuaishou_videoid_txt_dir = "./txt/"
  151. all_files = sorted(os.listdir(yesterday_kuaishou_videoid_txt_dir))
  152. for file in all_files:
  153. name = os.path.splitext(file)[0]
  154. if name == cls.yesterday + "_kuaishou_videoid":
  155. os.remove(yesterday_kuaishou_videoid_txt_dir + file)
  156. Common.crawler_log().info("删除快手昨天下载统计文件成功")
  157. if __name__ == "__main__":
  158. common = Common()
  159. common.del_yesterday_kuaishou_videoid_txt()
  160. print(common.kuaishou_today_download_count())