|
@@ -6,6 +6,7 @@
|
|
|
"""
|
|
|
from datetime import date, timedelta
|
|
|
from loguru import logger
|
|
|
+from hashlib import md5
|
|
|
import datetime
|
|
|
import os
|
|
|
import time
|
|
@@ -87,7 +88,9 @@ class Common:
|
|
|
# 保存视频信息至 "./videos/{video_dict['video_title}/info.txt"
|
|
|
@classmethod
|
|
|
def save_video_info(cls, log_type, crawler, video_dict):
|
|
|
- with open(f"./{crawler}/videos/{video_dict['video_title']}/info.txt",
|
|
|
+ md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
|
|
|
+
|
|
|
+ with open(f"./{crawler}/videos/{md_title}/info.txt",
|
|
|
"a", encoding="UTF-8") as f_a:
|
|
|
f_a.write(str(video_dict['video_id']) + "\n" +
|
|
|
str(video_dict['video_title']) + "\n" +
|
|
@@ -118,7 +121,8 @@ class Common:
|
|
|
if not os.path.exists(videos_dir):
|
|
|
os.mkdir(videos_dir)
|
|
|
# 首先创建一个保存该视频相关信息的文件夹
|
|
|
- video_path = f"./{crawler}/videos/{title}/"
|
|
|
+ md_title = md5(title.encode('utf8')).hexdigest()
|
|
|
+ video_path = f"./{crawler}/videos/{md_title}/"
|
|
|
if not os.path.exists(video_path):
|
|
|
os.mkdir(video_path)
|
|
|
|
|
@@ -170,7 +174,7 @@ class Common:
|
|
|
# 下载封面
|
|
|
urllib3.disable_warnings()
|
|
|
# response = requests.get(cover_url, proxies=cls.tunnel_proxies(), verify=False)
|
|
|
- response = requests.get(cover_url, proxies=proxies, verify=False)
|
|
|
+ response = requests.get(cover_url, verify=False)
|
|
|
try:
|
|
|
with open(video_path + cover_name, "wb") as f:
|
|
|
f.write(response.content)
|
|
@@ -185,7 +189,7 @@ class Common:
|
|
|
# 视频名
|
|
|
video_name = "video.mp4"
|
|
|
try:
|
|
|
- download_cmd = f'yt-dlp -f "bv[height=720][ext=mp4]+ba[ext=m4a]" --merge-output-format mp4 {video_url}-U -o {video_path}{video_name}'
|
|
|
+ download_cmd = f'yt-dlp -f "bv[height=720][ext=mp4]+ba[ext=m4a]" --merge-output-format mp4 "{video_url}-U" -o {video_path}{video_name}'
|
|
|
Common.logger(log_type, crawler).info(f"download_cmd:{download_cmd}")
|
|
|
os.system(download_cmd)
|
|
|
# move_cmd = f"mv {video_name} {video_path}"
|