publish.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/1/31
  4. """
  5. 站内UID配置 / 环境配置 / 视频上传
  6. """
  7. import json
  8. import os
  9. import random
  10. import shutil
  11. import sys
  12. import time
  13. import oss2
  14. import requests
  15. import urllib3
  16. sys.path.append(os.getcwd())
  17. from common.common import Common
  18. proxies = {"http": None, "https": None}
  19. class Publish:
  20. @classmethod
  21. def publish_video_dev(cls, log_type, crawler, request_data):
  22. """
  23. loginUid 站内uid (随机)
  24. appType 默认:888888
  25. crawlerSrcId 站外视频ID
  26. crawlerSrcCode 渠道(自定义 KYK)
  27. crawlerSrcPublishTimestamp 视频原发布时间
  28. crawlerTaskTimestamp 爬虫创建时间(可以是当前时间)
  29. videoPath 视频oss地址
  30. coverImgPath 视频封面oss地址
  31. title 标题
  32. totalTime 视频时长
  33. viewStatus 视频的有效状态 默认1
  34. versionCode 版本 默认1
  35. :return:
  36. """
  37. Common.logger(log_type, crawler).info('publish request data: {}'.format(request_data))
  38. result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
  39. Common.logger(log_type, crawler).info('publish result: {}'.format(result))
  40. video_id = result["data"]["id"]
  41. Common.logger(log_type, crawler).info('video_id: {}'.format(video_id))
  42. if result['code'] != 0:
  43. Common.logger(log_type, crawler).error('pushlish failure msg = {}'.format(result['msg']))
  44. else:
  45. Common.logger(log_type, crawler).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
  46. return video_id
  47. @classmethod
  48. def publish_video_prod(cls, log_type, crawler, request_data):
  49. """
  50. loginUid 站内uid (随机)
  51. appType 默认:888888
  52. crawlerSrcId 站外视频ID
  53. crawlerSrcCode 渠道(自定义 KYK)
  54. crawlerSrcPublishTimestamp 视频原发布时间
  55. crawlerTaskTimestamp 爬虫创建时间(可以是当前时间)
  56. videoPath 视频oss地址
  57. coverImgPath 视频封面oss地址
  58. title 标题
  59. totalTime 视频时长
  60. viewStatus 视频的有效状态 默认1
  61. versionCode 版本 默认1
  62. :return:
  63. """
  64. Common.logger(log_type, crawler).info(f'publish request data: {request_data}')
  65. result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
  66. Common.logger(log_type, crawler).info(f'publish result: {result}')
  67. video_id = result["data"]["id"]
  68. Common.logger(log_type, crawler).info(f'video_id: {video_id}')
  69. if result['code'] != 0:
  70. Common.logger(log_type, crawler).error('pushlish failure msg = {}'.format(result['msg']))
  71. else:
  72. Common.logger(log_type, crawler).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
  73. return video_id
  74. @classmethod
  75. def request_post(cls, request_url, request_data):
  76. """
  77. post 请求 HTTP接口
  78. :param request_url: 接口URL
  79. :param request_data: 请求参数
  80. :return: res_data json格式
  81. """
  82. urllib3.disable_warnings()
  83. response = requests.post(url=request_url, data=request_data, proxies=proxies, verify=False)
  84. if response.status_code == 200:
  85. res_data = json.loads(response.text)
  86. return res_data
  87. @classmethod
  88. def bucket(cls, oss_endpoint):
  89. """
  90. 创建 bucket
  91. :param oss_endpoint: inner:内网;out:外网;hk:香港
  92. :return: bucket
  93. """
  94. # 以下代码展示了基本的文件上传、下载、罗列、删除用法。
  95. # 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
  96. # 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
  97. #
  98. # 以杭州区域为例,Endpoint可以是:
  99. # http://oss-cn-hangzhou.aliyuncs.com
  100. # https://oss-cn-hangzhou.aliyuncs.com
  101. # 分别以HTTP、HTTPS协议访问。
  102. access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAIP6x1l3DXfSxm')
  103. access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', 'KbTaM9ars4OX3PMS6Xm7rtxGr1FLon')
  104. bucket_name = os.getenv('OSS_TEST_BUCKET', 'art-pubbucket')
  105. # OSS 内网
  106. if oss_endpoint == 'inner':
  107. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou-internal.aliyuncs.com')
  108. # OSS 外网
  109. elif oss_endpoint == 'out':
  110. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
  111. elif oss_endpoint == 'hk':
  112. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-accelerate.aliyuncs.com')
  113. # 默认走外网
  114. else:
  115. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
  116. # 确认上面的参数都填写正确了
  117. for param in (access_key_id, access_key_secret, bucket_name, endpoint):
  118. assert '<' not in param, '请设置参数:' + param
  119. # 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
  120. bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
  121. return bucket
  122. """
  123. 处理流程:
  124. 1. 定时(每天凌晨1点执行一次)循环files文件下的内容 结构:files -> 视频文件夹 -> 视频文件 + 封面图 + 基本信息
  125. 2. 视频文件和封面上传到oss
  126. - 视频文件oss目录 longvideo/crawler_local/video/prod/文件名
  127. - 视频封面oss目录 longvideo/crawler_local/image/prod/文件名
  128. 3. 发布视频
  129. - 读取 基本信息 调用发布接口
  130. """
  131. # env 日期20220225 文件名
  132. oss_file_path_video = 'longvideo/crawler_local/video/{}/{}/{}'
  133. oss_file_path_image = 'longvideo/crawler_local/image/{}/{}/{}'
  134. @classmethod
  135. def put_file(cls, log_type, crawler, oss_endpoint, oss_file, local_file):
  136. # cls.bucket.put_object_from_file(oss_file, local_file)
  137. cls.bucket(oss_endpoint).put_object_from_file(oss_file, local_file)
  138. Common.logger(log_type, crawler).info("put oss file = {}, local file = {} success".format(oss_file, local_file))
  139. # 清除本地文件
  140. @classmethod
  141. def remove_local_file(cls, log_type, crawler, local_file):
  142. os.remove(local_file)
  143. Common.logger(log_type, crawler).info("remove local file = {} success".format(local_file))
  144. # 清除本地文件夹
  145. @classmethod
  146. def remove_local_file_dir(cls, log_type, crawler, local_file):
  147. os.rmdir(local_file)
  148. Common.logger(log_type, crawler).info("remove local file dir = {} success".format(local_file))
  149. # 站内 UID
  150. @classmethod
  151. def uids(cls, crawler, strategy, our_uid, env):
  152. """
  153. 站内 ID
  154. :param crawler: 哪款爬虫
  155. :param env: 什么环境
  156. :param strategy: 榜单类型,也可以是指定的站内 UID
  157. :param our_uid: 上传到指定站内 UID
  158. :return: uid
  159. """
  160. if env == 'dev':
  161. uids_dev = [6267140, 6267141]
  162. return random.choice(uids_dev)
  163. elif crawler == 'kanyikan':
  164. uids_prod_kanyikan_moment = [20631208, 20631209, 20631210, 20631211, 20631212,
  165. 20631213, 20631214, 20631215, 20631216, 20631217,
  166. 20631223, 20631224, 20631225, 20631226, 20631227]
  167. return random.choice(uids_prod_kanyikan_moment)
  168. elif crawler == 'ggdc' and env == 'prod' and strategy == 'kanyikan_recommend':
  169. uids_ggdc_prod_recommend = [26117661, 26117662, 26117663]
  170. return random.choice(uids_ggdc_prod_recommend)
  171. elif crawler == 'ggdc' and env == 'prod' and strategy == 'follow':
  172. uids_ggdc_prod_follow = [26117661, 26117662, 26117663]
  173. return random.choice(uids_ggdc_prod_follow)
  174. else:
  175. return our_uid
  176. # 爬虫渠道号
  177. @classmethod
  178. def crawlersrccode(cls, crawler):
  179. if crawler == 'youtube':
  180. return 'YOUTUBE'
  181. elif crawler == 'kanyikan':
  182. return 'KANYIKAN'
  183. elif crawler == "kuaishou":
  184. return "KUAISHOU_XCX"
  185. elif crawler == "weishi":
  186. return "WEISHI"
  187. elif crawler == "xiaoniangao":
  188. return "XIAONIANGAO_XCX"
  189. elif crawler == "benshanzhufu":
  190. return "BENSHANZHUFU"
  191. elif crawler == "gongzhonghao_xinxin":
  192. return "GONGZHONGHAO_XINXIN"
  193. elif crawler == 'shipinhao':
  194. return 'SHIPINHAO_XCX'
  195. elif crawler == 'xigua':
  196. return 'XIGUA'
  197. elif crawler == 'zhihu':
  198. return 'ZHIHU'
  199. elif crawler == 'jixiangxingfu':
  200. return 'JIXIANGXINGFU'
  201. elif crawler == 'zhongmiaoyinxin':
  202. return 'ZHONGMIAOYINXIN'
  203. elif crawler == 'suisuiniannianyingfuqi':
  204. return 'SUISUINIANNIANYINGFUQI'
  205. elif crawler == 'zhufumao':
  206. return 'ZHUFUMAO'
  207. elif crawler == 'zongjiao':
  208. return 'ZONGJIAO'
  209. elif crawler == 'haokan':
  210. return 'HAOKAN'
  211. elif crawler == 'kandaojiushifuqi':
  212. return 'KANDAOJIUSHIFUQI'
  213. elif crawler == 'shengshengyingyin':
  214. return 'SHENGSHENGYINGYIN'
  215. elif crawler == 'ganggangdouchuan':
  216. return 'GANGGANGDOUCHUAN'
  217. elif crawler == 'gongzhonghao_xinxin':
  218. return 'GONGZHONGHAO_XINXIN'
  219. elif crawler == 'weixinzhishu':
  220. return 'WEIXINZHISHU'
  221. else:
  222. return "CRAWLER"
  223. @classmethod
  224. def local_file_path(cls, crawler):
  225. local_file_path = f'./{crawler}/videos'
  226. video_file = 'video'
  227. image_file = 'image'
  228. info_file = 'info'
  229. loacl_file_dict = {
  230. 'local_file_path': local_file_path,
  231. 'video_file': video_file,
  232. 'image_file': image_file,
  233. 'info_file': info_file}
  234. return loacl_file_dict
  235. @classmethod
  236. def upload_and_publish(cls, log_type, crawler, strategy, our_uid, env, oss_endpoint):
  237. """
  238. 上传视频到 oss
  239. :param log_type: 选择的 log
  240. :param crawler: 哪款爬虫
  241. :param env: 测试环境:dev,正式环境:prod
  242. :param our_uid: 站内 UID
  243. :param strategy: 榜单类型
  244. :param oss_endpoint: 内网:inner;外网:out
  245. """
  246. Common.logger(log_type, crawler).info("upload_and_publish starting...")
  247. today = time.strftime("%Y%m%d", time.localtime())
  248. # videos 目录下的所有视频文件夹
  249. files = os.listdir(cls.local_file_path(crawler)["local_file_path"])
  250. for fv in files:
  251. try:
  252. # 单个视频文件夹
  253. fi_d = os.path.join(cls.local_file_path(crawler)["local_file_path"], fv)
  254. # 确认为视频文件夹
  255. if os.path.isdir(fi_d):
  256. Common.logger(log_type, crawler).info('dir = {}'.format(fi_d))
  257. # 列出所有视频文件夹
  258. dir_files = os.listdir(fi_d)
  259. data = {'appType': '888888',
  260. 'crawlerSrcCode': cls.crawlersrccode(crawler),
  261. 'viewStatus': '1',
  262. 'versionCode': '1'}
  263. now_timestamp = int(round(time.time() * 1000))
  264. data['crawlerTaskTimestamp'] = str(now_timestamp)
  265. data['loginUid'] = cls.uids(crawler, strategy, our_uid, env)
  266. # 单个视频文件夹下的所有视频文件
  267. for fi in dir_files:
  268. # 视频文件夹下的所有文件路径
  269. fi_path = fi_d + '/' + fi
  270. Common.logger(log_type, crawler).info('dir fi_path = {}'.format(fi_path))
  271. # 读取 info.txt,赋值给 data
  272. if cls.local_file_path(crawler)["info_file"] in fi:
  273. f = open(fi_path, "r", encoding="UTF-8")
  274. # 读取数据 数据准确性写入的时候保证 读取暂不处理
  275. for i in range(14):
  276. line = f.readline()
  277. line = line.replace('\n', '')
  278. if line is not None and len(line) != 0 and not line.isspace():
  279. # Common.logger(log_type, crawler).info("line = {}".format(line))
  280. if i == 0:
  281. data['crawlerSrcId'] = line
  282. elif i == 1:
  283. data['title'] = line
  284. elif i == 2:
  285. data['totalTime'] = line
  286. elif i == 8:
  287. data['crawlerSrcPublishTimestamp'] = line
  288. else:
  289. Common.logger(log_type, crawler).warning("{} line is None".format(fi_path))
  290. f.close()
  291. # remove info.txt
  292. cls.remove_local_file(log_type, crawler, fi_path)
  293. # 刷新数据
  294. dir_files = os.listdir(fi_d)
  295. for fi in dir_files:
  296. fi_path = fi_d + '/' + fi
  297. # Common.logger(log_type, crawler).info('dir fi_path = {}'.format(fi_path))
  298. # 上传oss
  299. if cls.local_file_path(crawler)["video_file"] in fi:
  300. global oss_video_file
  301. if env == "dev":
  302. oss_video_file = cls.oss_file_path_video.format("dev", today, data['crawlerSrcId'])
  303. elif env == "prod":
  304. oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
  305. Common.logger(log_type, crawler).info("oss_video_file = {}".format(oss_video_file))
  306. cls.put_file(log_type, crawler, oss_endpoint, oss_video_file, fi_path)
  307. data['videoPath'] = oss_video_file
  308. Common.logger(log_type, crawler).info("videoPath = {}".format(oss_video_file))
  309. elif cls.local_file_path(crawler)["image_file"] in fi:
  310. global oss_image_file
  311. if env == "dev":
  312. oss_image_file = cls.oss_file_path_image.format("env", today, data['crawlerSrcId'])
  313. elif env == "prod":
  314. oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
  315. Common.logger(log_type, crawler).info("oss_image_file = {}".format(oss_image_file))
  316. cls.put_file(log_type, crawler, oss_endpoint, oss_image_file, fi_path)
  317. data['coverImgPath'] = oss_image_file
  318. Common.logger(log_type, crawler).info("coverImgPath = {}".format(oss_image_file))
  319. # 全部remove
  320. cls.remove_local_file(log_type, crawler, fi_path)
  321. # 发布
  322. if env == "dev":
  323. video_id = cls.publish_video_dev(log_type, crawler, data)
  324. elif env == "prod":
  325. video_id = cls.publish_video_prod(log_type, crawler, data)
  326. else:
  327. video_id = cls.publish_video_dev(log_type, crawler, data)
  328. cls.remove_local_file_dir(log_type, crawler, fi_d)
  329. Common.logger(log_type, crawler).info('video_id:{}', video_id)
  330. return video_id
  331. else:
  332. Common.logger(log_type, crawler).error('file not a dir = {}'.format(fi_d))
  333. except Exception as e:
  334. # 删除视频文件夹
  335. shutil.rmtree(f"./{crawler}/videos/{fv}/")
  336. Common.logger(log_type, crawler).exception('upload_and_publish error', e)