publish.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. """
  2. 上传视频到阿里云 OSS
  3. 上传视频到管理后台
  4. """
  5. import json
  6. import os
  7. import random
  8. import time
  9. import oss2
  10. import requests
  11. import urllib3
  12. from main.common import Common
  13. proxies = {"http": None, "https": None}
  14. class Publish:
  15. @classmethod
  16. def publish_video_dev(cls, request_data):
  17. """
  18. loginUid 站内uid (随机)
  19. appType 默认:888888
  20. crawlerSrcId 站外视频ID
  21. crawlerSrcCode 渠道(自定义 KYK)
  22. crawlerSrcPublishTimestamp 视频原发布时间
  23. crawlerTaskTimestamp 爬虫创建时间(可以是当前时间)
  24. videoPath 视频oss地址
  25. coverImgPath 视频封面oss地址
  26. title 标题
  27. totalTime 视频时长
  28. viewStatus 视频的有效状态 默认1
  29. versionCode 版本 默认1
  30. :return:
  31. """
  32. Common.logger().info('publish request data: {}'.format(request_data))
  33. result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
  34. Common.logger().info('publish result: {}'.format(result))
  35. if result['code'] != 0:
  36. Common.logger().error('pushlish failure msg = {}'.format(result['msg']))
  37. else:
  38. Common.logger().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
  39. @classmethod
  40. def publish_video_prod(cls, request_data):
  41. """
  42. loginUid 站内uid (随机)
  43. appType 默认:888888
  44. crawlerSrcId 站外视频ID
  45. crawlerSrcCode 渠道(自定义 KYK)
  46. crawlerSrcPublishTimestamp 视频原发布时间
  47. crawlerTaskTimestamp 爬虫创建时间(可以是当前时间)
  48. videoPath 视频oss地址
  49. coverImgPath 视频封面oss地址
  50. title 标题
  51. totalTime 视频时长
  52. viewStatus 视频的有效状态 默认1
  53. versionCode 版本 默认1
  54. :return:
  55. """
  56. result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
  57. Common.logger().info('publish result: {}'.format(result))
  58. if result['code'] != 0:
  59. Common.logger().error('pushlish failure msg = {}'.format(result['msg']))
  60. else:
  61. Common.logger().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
  62. @classmethod
  63. def request_post(cls, request_url, request_data):
  64. """
  65. post 请求 HTTP接口
  66. :param request_url: 接口URL
  67. :param request_data: 请求参数
  68. :return: res_data json格式
  69. """
  70. urllib3.disable_warnings()
  71. response = requests.post(url=request_url, data=request_data, proxies=proxies, verify=False)
  72. if response.status_code == 200:
  73. res_data = json.loads(response.text)
  74. return res_data
  75. # 以下代码展示了基本的文件上传、下载、罗列、删除用法。
  76. # 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
  77. # 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
  78. #
  79. # 以杭州区域为例,Endpoint可以是:
  80. # http://oss-cn-hangzhou.aliyuncs.com
  81. # https://oss-cn-hangzhou.aliyuncs.com
  82. # 分别以HTTP、HTTPS协议访问。
  83. access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAIP6x1l3DXfSxm')
  84. access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', 'KbTaM9ars4OX3PMS6Xm7rtxGr1FLon')
  85. bucket_name = os.getenv('OSS_TEST_BUCKET', 'art-pubbucket')
  86. # endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou-internal.aliyuncs.com')
  87. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
  88. # 确认上面的参数都填写正确了
  89. for param in (access_key_id, access_key_secret, bucket_name, endpoint):
  90. assert '<' not in param, '请设置参数:' + param
  91. # 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
  92. bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
  93. """
  94. 处理流程:
  95. 1. 定时(每天凌晨1点执行一次)循环files文件下的内容 结构:files -> 视频文件夹 -> 视频文件 + 封面图 + 基本信息
  96. 2. 视频文件和封面上传到oss
  97. - 视频文件oss目录 longvideo/crawler_local/video/prod/文件名
  98. - 视频封面oss目录 longvideo/crawler_local/image/prod/文件名
  99. 3. 发布视频
  100. - 读取 基本信息 调用发布接口
  101. """
  102. # env 日期20220225 文件名
  103. oss_file_path_video = 'longvideo/crawler_local/video/{}/{}/{}'
  104. oss_file_path_image = 'longvideo/crawler_local/image/{}/{}/{}'
  105. @classmethod
  106. def put_file(cls, oss_file, local_file):
  107. cls.bucket.put_object_from_file(oss_file, local_file)
  108. Common.logger().info("put oss file = {}, local file = {} success".format(oss_file, local_file))
  109. # 清除本地文件
  110. @classmethod
  111. def remove_local_file(cls, local_file):
  112. os.remove(local_file)
  113. Common.logger().info("remove local file = {} success".format(local_file))
  114. # 清除本地文件夹
  115. @classmethod
  116. def remove_local_file_dir(cls, local_file):
  117. os.rmdir(local_file)
  118. Common.logger().info("remove local file dir = {} success".format(local_file))
  119. local_file_path = './videos'
  120. video_file = 'video'
  121. image_file = 'image'
  122. info_file = 'info'
  123. uids_dev_up = [6267140]
  124. uids_dev_play = [6267141]
  125. uids_prod_up = [20631208, 20631209, 20631210, 20631211, 20631212,
  126. 20631213, 20631214, 20631215, 20631216, 20631217]
  127. uids_prod_play = [20631228, 20631229, 20631230, 20631231, 20631232,
  128. 20631233, 20631234, 20631235, 20631236, 20631237]
  129. @classmethod
  130. def upload_and_publish(cls, env, job):
  131. """
  132. 上传视频到 oss
  133. :param env: 测试环境:dev,正式环境:prod
  134. :param job: 上升榜:up,播放量:play
  135. """
  136. Common.logger().info("upload_and_publish starting...")
  137. today = time.strftime("%Y%m%d", time.localtime())
  138. # videos 目录下的所有视频文件夹
  139. files = os.listdir(cls.local_file_path)
  140. for f in files:
  141. try:
  142. # 单个视频文件夹
  143. fi_d = os.path.join(cls.local_file_path, f)
  144. # 确认为视频文件夹
  145. if os.path.isdir(fi_d):
  146. Common.logger().info('dir = {}'.format(fi_d))
  147. # 列出所有视频文件夹
  148. dir_files = os.listdir(fi_d)
  149. data = {'appType': '888888', 'crawlerSrcCode': 'XIAONIANGAO_XCX', 'viewStatus': '1', 'versionCode': '1'}
  150. now_timestamp = int(round(time.time() * 1000))
  151. data['crawlerTaskTimestamp'] = str(now_timestamp)
  152. global uid
  153. if env == "dev" and job == "up":
  154. uid = str(random.choice(cls.uids_dev_up))
  155. elif env == "dev" and job == "play":
  156. uid = str(random.choice(cls.uids_dev_play))
  157. elif env == "prod" and job == "up":
  158. uid = str(random.choice(cls.uids_prod_up))
  159. elif env == "prod" and job == "play":
  160. uid = str(random.choice(cls.uids_prod_play))
  161. data['loginUid'] = uid
  162. # 单个视频文件夹下的所有视频文件
  163. for fi in dir_files:
  164. # 视频文件夹下的所有文件路径
  165. fi_path = fi_d + '/' + fi
  166. Common.logger().info('dir fi_path = {}'.format(fi_path))
  167. # 读取 info.txt,赋值给 data
  168. if cls.info_file in fi:
  169. f = open(fi_path, "r", encoding="UTF-8")
  170. # 读取数据 数据准确性写入的时候保证 读取暂不处理
  171. for i in range(14):
  172. line = f.readline()
  173. line = line.replace('\n', '')
  174. if line is not None and len(line) != 0 and not line.isspace():
  175. Common.logger().info("line = {}".format(line))
  176. if i == 0:
  177. data['crawlerSrcId'] = line
  178. elif i == 1:
  179. data['title'] = line
  180. elif i == 2:
  181. data['totalTime'] = line
  182. elif i == 8:
  183. data['crawlerSrcPublishTimestamp'] = line
  184. else:
  185. Common.logger().warning("{} line is None".format(fi_path))
  186. f.close()
  187. # remove info.txt
  188. cls.remove_local_file(fi_path)
  189. # 刷新数据
  190. dir_files = os.listdir(fi_d)
  191. for fi in dir_files:
  192. fi_path = fi_d + '/' + fi
  193. Common.logger().info('dir fi_path = {}'.format(fi_path))
  194. # 上传oss
  195. if cls.video_file in fi:
  196. global oss_video_file
  197. if env == "dev":
  198. oss_video_file = cls.oss_file_path_video.format("dev", today, data['crawlerSrcId'])
  199. elif env == "prod":
  200. oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
  201. Common.logger().info("oss_video_file = {}".format(oss_video_file))
  202. cls.put_file(oss_video_file, fi_path)
  203. data['videoPath'] = oss_video_file
  204. Common.logger().info("videoPath = {}".format(oss_video_file))
  205. elif cls.image_file in fi:
  206. global oss_image_file
  207. if env == "dev":
  208. oss_image_file = cls.oss_file_path_image.format("env", today, data['crawlerSrcId'])
  209. elif env == "prod":
  210. oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
  211. Common.logger().info("oss_image_file = {}".format(oss_image_file))
  212. cls.put_file(oss_image_file, fi_path)
  213. data['coverImgPath'] = oss_image_file
  214. Common.logger().info("coverImgPath = {}".format(oss_image_file))
  215. # 全部remove
  216. cls.remove_local_file(fi_path)
  217. # 发布
  218. if env == "dev":
  219. cls.publish_video_dev(data)
  220. elif env == "prod":
  221. cls.publish_video_prod(data)
  222. cls.remove_local_file_dir(fi_d)
  223. else:
  224. Common.logger().error('file not a dir = {}'.format(fi_d))
  225. except Exception as e:
  226. Common.logger().exception('upload_and_publish error', e)