download_videos_task.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. import traceback
  2. import datetime
  3. import os
  4. import oss2
  5. import multiprocessing
  6. from threading import Timer
  7. from utils import data_check, get_feature_data
  8. from config import set_config
  9. from log import Log
  10. import ODPSQueryUtil
  11. from ReadXlsxFile import getVideoInfoInXlxs
  12. config_ = set_config()
  13. log_ = Log()
  14. features = ['videoid', 'title', 'video_path']
  15. def download_video_from_oss(video_id, video_path, download_folder):
  16. """从oss下载视频"""
  17. try:
  18. pid = int(os.getpid() % 2)
  19. print(f"{video_id} download start ...")
  20. download_folder = f"{download_folder}_{pid}"
  21. if not os.path.exists(download_folder):
  22. os.makedirs(download_folder)
  23. video_local_dir = os.path.join(download_folder, str(video_id))
  24. os.makedirs(video_local_dir)
  25. video_filename = video_path.split('/')[-1]
  26. video_local_path = os.path.join(video_local_dir, video_filename)
  27. # 阿里云账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM用户进行API访问或日常运维,请登录RAM控制台创建RAM用户。
  28. # auth = oss2.Auth(access_key_id=config_.ODPS_CONFIG['ACCESSID'], access_key_secret=config_.ODPS_CONFIG['ACCESSKEY'])
  29. auth = oss2.Auth(access_key_id=config_.OSS_CONFIG['accessKeyId'],
  30. access_key_secret=config_.OSS_CONFIG['accessKeySecret'])
  31. # Endpoint以杭州为例,其它Region请按实际情况填写。
  32. bucket = oss2.Bucket(
  33. auth, endpoint=config_.OSS_CONFIG['endpoint'], bucket_name='art-pubbucket')
  34. # 下载OSS文件到本地文件。
  35. # <yourObjectName>由包含文件后缀,不包含Bucket名称组成的Object完整路径,例如abc/efg/123.jpg。
  36. # <yourLocalFile>由本地文件路径加文件名包括后缀组成,例如/users/local/myfile.txt。
  37. bucket.get_object_to_file(video_path, video_local_path)
  38. # m3u8文件,需下载所有ts文件
  39. if video_filename.split('.')[-1] == 'm3u8':
  40. root_path = '/'.join(video_path.split('/')[:-1])
  41. with open(video_local_path, 'r') as rf:
  42. lines = rf.readlines()
  43. for line in lines:
  44. line = line.strip()
  45. print(line)
  46. if line[-3:] == '.ts':
  47. ts_path = os.path.join(root_path, line)
  48. ts_local_path = os.path.join(video_local_dir, line)
  49. bucket.get_object_to_file(ts_path, ts_local_path)
  50. print(f"{video_id} download end!")
  51. except:
  52. print(f"{video_id} download fail!")
  53. print(traceback.format_exc())
  54. def download_videos(project, table, dt):
  55. # 获取特征数据
  56. feature_df = get_feature_data(
  57. project=project, table=table, dt=dt, features=features)
  58. download_folder = 'videos'
  59. video_id_list = feature_df['videoid'].to_list()
  60. pool = multiprocessing.Pool(processes=6)
  61. for video_id in video_id_list:
  62. video_path = feature_df[feature_df['videoid']
  63. == video_id]['video_path'].values[0].strip()
  64. video_path = video_path.replace(' ', '')
  65. print(video_id, video_path)
  66. pool.apply_async(
  67. func=download_video_from_oss,
  68. args=(video_id, video_path, download_folder)
  69. )
  70. pool.close()
  71. pool.join()
  72. def timer_check():
  73. try:
  74. project = config_.DAILY_VIDEO['project']
  75. table = config_.DAILY_VIDEO['table']
  76. # 昨天
  77. now_date = datetime.datetime.today()
  78. print(f"now_date: {datetime.datetime.strftime(now_date, '%Y%m%d')}")
  79. dt = datetime.datetime.strftime(
  80. now_date-datetime.timedelta(days=1), '%Y%m%d')
  81. # 查看数据是否已准备好
  82. data_count = data_check(project=project, table=table, dt=dt)
  83. if data_count > 0:
  84. print(f'videos count = {data_count}')
  85. # 数据准备好,进行视频下载
  86. download_videos(project=project, table=table, dt=dt)
  87. print(f"videos download end!")
  88. else:
  89. # 数据没准备好,1分钟后重新检查
  90. Timer(60, timer_check).start()
  91. except Exception as e:
  92. print(f"视频下载失败, exception: {e}, traceback: {traceback.format_exc()}")
  93. if __name__ == '__main__':
  94. # timer_check()
  95. feature_df = getVideoInfoInXlxs('past_videos.xlsx')
  96. download_folder = 'videos'
  97. video_id_list = feature_df['videoid'].to_list()
  98. pool = multiprocessing.Pool(processes=6)
  99. for video_id in video_id_list:
  100. video_path = feature_df[feature_df['videoid']
  101. == video_id]['video_path'].values[0].strip()
  102. video_path = video_path.replace(' ', '')
  103. print(video_id, video_path)
  104. pool.apply_async(
  105. func=download_video_from_oss,
  106. args=(video_id, video_path, download_folder)
  107. )
  108. pool.close()
  109. pool.join()