publish.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/6/1
  4. import json
  5. import os
  6. import random
  7. import sys
  8. import time
  9. import oss2
  10. import requests
  11. import urllib3
  12. sys.path.append(os.getcwd())
  13. from main.common import Common
  14. proxies = {"http": None, "https": None}
  15. class Publish:
  16. @classmethod
  17. def publish_video_dev(cls, log_type, request_data):
  18. """
  19. loginUid 站内uid (随机)
  20. appType 默认:888888
  21. crawlerSrcId 站外视频ID
  22. crawlerSrcCode 渠道(自定义 KYK)
  23. crawlerSrcPublishTimestamp 视频原发布时间
  24. crawlerTaskTimestamp 爬虫创建时间(可以是当前时间)
  25. videoPath 视频oss地址
  26. coverImgPath 视频封面oss地址
  27. title 标题
  28. totalTime 视频时长
  29. viewStatus 视频的有效状态 默认1
  30. versionCode 版本 默认1
  31. :return:
  32. """
  33. Common.logger(log_type).info('publish request data: {}'.format(request_data))
  34. result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
  35. Common.logger(log_type).info('publish result: {}'.format(result))
  36. video_id = result["data"]["id"]
  37. Common.logger(log_type).info('video_id: {}'.format(video_id))
  38. if result['code'] != 0:
  39. Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
  40. else:
  41. Common.logger(log_type).info(
  42. 'publish success video_id = : {}'.format(request_data['crawlerSrcId']))
  43. return video_id
  44. @classmethod
  45. def publish_video_prod(cls, log_type, request_data):
  46. """
  47. loginUid 站内uid (随机)
  48. appType 默认:888888
  49. crawlerSrcId 站外视频ID
  50. crawlerSrcCode 渠道(自定义 KYK)
  51. crawlerSrcPublishTimestamp 视频原发布时间
  52. crawlerTaskTimestamp 爬虫创建时间(可以是当前时间)
  53. videoPath 视频oss地址
  54. coverImgPath 视频封面oss地址
  55. title 标题
  56. totalTime 视频时长
  57. viewStatus 视频的有效状态 默认1
  58. versionCode 版本 默认1
  59. :return:
  60. """
  61. Common.logger(log_type).info(f'publish request data: {request_data}')
  62. result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
  63. Common.logger(log_type).info(f'publish result: {result}')
  64. video_id = result["data"]["id"]
  65. Common.logger(log_type).info(f'video_id: {video_id}')
  66. if result['code'] != 0:
  67. Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
  68. else:
  69. Common.logger(log_type).info(
  70. 'publish success video_id = : {}'.format(request_data['crawlerSrcId']))
  71. return video_id
  72. @classmethod
  73. def request_post(cls, request_url, request_data):
  74. """
  75. post 请求 HTTP接口
  76. :param request_url: 接口URL
  77. :param request_data: 请求参数
  78. :return: res_data json格式
  79. """
  80. urllib3.disable_warnings()
  81. response = requests.post(url=request_url, data=request_data, proxies=proxies, verify=False)
  82. if response.status_code == 200:
  83. res_data = json.loads(response.text)
  84. return res_data
  85. @classmethod
  86. def bucket(cls, oss_endpoint):
  87. """
  88. 创建 bucket
  89. :param oss_endpoint: inner:内网;out:外网;hk:香港
  90. :return: bucket
  91. """
  92. # 以下代码展示了基本的文件上传、下载、罗列、删除用法。
  93. # 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
  94. # 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
  95. #
  96. # 以杭州区域为例,Endpoint可以是:
  97. # http://oss-cn-hangzhou.aliyuncs.com
  98. # https://oss-cn-hangzhou.aliyuncs.com
  99. # 分别以HTTP、HTTPS协议访问。
  100. access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAIP6x1l3DXfSxm')
  101. access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', 'KbTaM9ars4OX3PMS6Xm7rtxGr1FLon')
  102. bucket_name = os.getenv('OSS_TEST_BUCKET', 'art-pubbucket')
  103. # OSS 内网
  104. if oss_endpoint == 'inner':
  105. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou-internal.aliyuncs.com')
  106. # OSS 外网
  107. elif oss_endpoint == 'out':
  108. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
  109. elif oss_endpoint == 'hk':
  110. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-accelerate.aliyuncs.com')
  111. # 默认走外网
  112. else:
  113. endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
  114. # 确认上面的参数都填写正确了
  115. for param in (access_key_id, access_key_secret, bucket_name, endpoint):
  116. assert '<' not in param, '请设置参数:' + param
  117. # 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
  118. bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
  119. return bucket
  120. """
  121. 处理流程:
  122. 1. 定时(每天凌晨1点执行一次)循环files文件下的内容 结构:files -> 视频文件夹 -> 视频文件 + 封面图 + 基本信息
  123. 2. 视频文件和封面上传到oss
  124. - 视频文件oss目录 longvideo/crawler_local/video/prod/文件名
  125. - 视频封面oss目录 longvideo/crawler_local/image/prod/文件名
  126. 3. 发布视频
  127. - 读取 基本信息 调用发布接口
  128. """
  129. # env 日期20220225 文件名
  130. oss_file_path_video = 'longvideo/crawler_local/video/{}/{}/{}'
  131. oss_file_path_image = 'longvideo/crawler_local/image/{}/{}/{}'
  132. @classmethod
  133. def put_file(cls, log_type, oss_endpoint, oss_file, local_file):
  134. # cls.bucket.put_object_from_file(oss_file, local_file)
  135. cls.bucket(oss_endpoint).put_object_from_file(oss_file, local_file)
  136. Common.logger(log_type).info("put oss file = {}, local file = {} success".format(oss_file, local_file))
  137. # 清除本地文件
  138. @classmethod
  139. def remove_local_file(cls, log_type, local_file):
  140. os.remove(local_file)
  141. Common.logger(log_type).info("remove local file = {} success".format(local_file))
  142. # 清除本地文件夹
  143. @classmethod
  144. def remove_local_file_dir(cls, log_type, local_file):
  145. os.rmdir(local_file)
  146. Common.logger(log_type).info("remove local file dir = {} success".format(local_file))
  147. # 站内 UID
  148. @classmethod
  149. def uids(cls, crawler, strategy, our_uid, env):
  150. """
  151. 站内 ID
  152. :param crawler: 哪款爬虫
  153. :param env: 什么环境
  154. :param strategy: 榜单类型,也可以是指定的站内 UID
  155. :param our_uid: 上传到指定站内 UID
  156. :return: uid
  157. """
  158. # if env == 'dev':
  159. # uids_dev = [6267140, 6267141]
  160. # return random.choice(uids_dev)
  161. # 小年糕
  162. if crawler == 'xiaoniangao' and env == 'prod' and strategy == '定向爬虫策略':
  163. uids_prod_xiaoniangao_follow = [50322210, 50322211, 50322212, 50322213, 50322214, 50322215,
  164. 50322216, 50322217, 50322218, 50322219, 50322220, 50322221, 50322236, 50322237]
  165. return random.choice(uids_prod_xiaoniangao_follow)
  166. elif crawler == 'xiaoniangao' and env == 'prod' and strategy == '小时榜爬虫策略':
  167. uids_prod_xiaoniangao_hour = [50322226, 50322227, 50322228, 50322229]
  168. return random.choice(uids_prod_xiaoniangao_hour)
  169. elif crawler == 'xiaoniangao' and env == 'prod' and strategy == '播放量榜爬虫策略':
  170. uids_prod_xiaoniangao_play = [50322222, 50322223, 50322224, 50322225]
  171. return random.choice(uids_prod_xiaoniangao_play)
  172. elif crawler == 'kanyikan' and env == 'prod' and strategy == '推荐抓取策略':
  173. uids_prod_kanyikan_recommend = [20631208, 20631209, 20631210, 20631211, 20631212,
  174. 20631213, 20631214, 20631215, 20631216, 20631217,
  175. 20631223, 20631224, 20631225, 20631226, 20631227]
  176. return random.choice(uids_prod_kanyikan_recommend)
  177. elif crawler == 'kanyikan' and env == 'prod' and strategy == '朋友圈抓取策略':
  178. uids_prod_kanyikan_moment = [20631208, 20631209, 20631210, 20631211, 20631212,
  179. 20631213, 20631214, 20631215, 20631216, 20631217,
  180. 20631223, 20631224, 20631225, 20631226, 20631227]
  181. return random.choice(uids_prod_kanyikan_moment)
  182. # elif crawler == 'gongzhonghao' and env == 'prod' and strategy == '定向爬虫策略':
  183. # uids_prod_gongzhonghao_follow = [26117675, 26117676, 26117677, 26117678, 26117679, 26117680]
  184. # return random.choice(uids_prod_gongzhonghao_follow)
  185. #
  186. # elif crawler == 'xigua' and env == 'prod' and strategy == '推荐榜爬虫策略':
  187. # uids_prod_gongzhonghao_follow = [50322238]
  188. # return random.choice(uids_prod_gongzhonghao_follow)
  189. # elif crawler == 'benshanzhufu' and env == 'prod' and strategy == '推荐榜爬虫策略':
  190. # uids_prod_benshanzhufu_recommend = [20631262, 20631263, 20631264, 20631265, 20631266, 20631267, 20631268, 20631269, 20631271, 20631272]
  191. # return random.choice(uids_prod_benshanzhufu_recommend)
  192. # elif crawler == 'suisuiniannianyingfuqi' and env == 'prod' and strategy == '推荐榜爬虫策略':
  193. # uids_prod_suisuiniannianyingfuqi_recommend = [26117547, 26117548, 26117549, 26117550, 26117551]
  194. # return random.choice(uids_prod_suisuiniannianyingfuqi_recommend)
  195. elif crawler == 'ganggangdouchuan' and env == 'prod' and strategy == '推荐榜爬虫策略':
  196. uids_prod_ganggangdouchuan_recommend = [26117661, 26117662, 26117663]
  197. return random.choice(uids_prod_ganggangdouchuan_recommend)
  198. elif crawler == 'jixiangxingfu' and env == 'prod' and strategy == '推荐榜爬虫策略':
  199. uids_prod_jixiangxingfu_recommend = [26117478, 26117479, 26117480, 26117471, 26117473, 26117474, 26117475, 26117476, 26117477]
  200. return random.choice(uids_prod_jixiangxingfu_recommend)
  201. elif crawler == 'zhongmiaoyinxin' and env == 'prod' and strategy == '推荐榜爬虫策略':
  202. uids_prod_zhongmiaoyinxin_recommend = [26117493, 26117494, 26117495, 26117496, 26117497, 26117498]
  203. return random.choice(uids_prod_zhongmiaoyinxin_recommend)
  204. elif crawler == 'zhiqingtiantiankan' and env == 'prod' and strategy == '推荐榜爬虫策略':
  205. uids_prod_zhiqingtiantiankan_recommend = [20631253, 20631254, 20631255, 20631256, 20631257, 20631258, 20631259, 20631260, 20631261]
  206. return random.choice(uids_prod_zhiqingtiantiankan_recommend)
  207. else:
  208. return our_uid
  209. # 爬虫渠道号
  210. @classmethod
  211. def crawlersrccode(cls, crawler):
  212. if crawler == 'youtube':
  213. return 'YOUTUBE'
  214. elif crawler == "kuaishou":
  215. return "KUAISHOU_XCX"
  216. elif crawler == "xiaoniangao":
  217. return "XIAONIANGAO_XCX"
  218. elif crawler == "gongzhonghao":
  219. return "GONGZHONGHAO_XINXIN"
  220. elif crawler == 'xigua':
  221. return 'XIGUA'
  222. elif crawler == 'weixinzhishu':
  223. return 'WEIXINZHISHU'
  224. elif crawler == "douyin":
  225. return "DOUYIN"
  226. elif crawler == "benshanzhufu":
  227. return "BENSHANZHUFU"
  228. elif crawler == 'suisuiniannianyingfuqi':
  229. return 'SUISUINIANNIANYINGFUQI'
  230. elif crawler == 'jixiangxingfu':
  231. return 'JIXIANGXINGFU'
  232. elif crawler == 'ganggangdouchuan':
  233. return 'GANGGANGDOUCHUAN'
  234. elif crawler == 'zhongmiaoyinxin':
  235. return 'ZHONGMIAOYINXIN'
  236. elif crawler == 'zhiqingzongqun':
  237. return 'ZHIQINGZONGQUN'
  238. elif crawler == 'zhiqingtiantiankan':
  239. return 'ZHIQINGZONGQUN'
  240. elif crawler == 'kanyikan':
  241. return 'KANYIKAN'
  242. elif crawler == "weishi":
  243. return "WEISHI"
  244. elif crawler == 'shipinhao':
  245. return 'SHIPINHAO_XCX'
  246. elif crawler == 'zhihu':
  247. return 'ZHIHU'
  248. elif crawler == 'zhufumao':
  249. return 'ZHUFUMAO'
  250. elif crawler == 'zongjiao':
  251. return 'ZONGJIAO'
  252. elif crawler == 'haokan':
  253. return 'HAOKAN'
  254. elif crawler == 'kandaojiushifuqi':
  255. return 'KANDAOJIUSHIFUQI'
  256. elif crawler == 'shengshengyingyin':
  257. return 'SHENGSHENGYINGYIN'
  258. else:
  259. return "CRAWLER"
  260. @classmethod
  261. def local_file_path(cls):
  262. # local_file_path = f'./{crawler}/videos'
  263. local_file_path = f'./videos'
  264. video_file = 'video'
  265. image_file = 'image'
  266. info_file = 'info'
  267. loacl_file_dict = {
  268. 'local_file_path': local_file_path,
  269. 'video_file': video_file,
  270. 'image_file': image_file,
  271. 'info_file': info_file}
  272. return loacl_file_dict
  273. @classmethod
  274. def upload_and_publish(cls, log_type, crawler, strategy, our_uid, env, oss_endpoint):
  275. """
  276. 上传视频到 oss
  277. :param log_type: 选择的 log
  278. :param crawler: 哪款爬虫
  279. :param env: 测试环境:dev,正式环境:prod
  280. :param our_uid: 站内 UID
  281. :param strategy: 榜单类型
  282. :param oss_endpoint: 内网:inner;外网:out
  283. """
  284. Common.logger(log_type).info("upload_and_publish starting...")
  285. today = time.strftime("%Y%m%d", time.localtime())
  286. # videos 目录下的所有视频文件夹
  287. files = os.listdir(cls.local_file_path()["local_file_path"])
  288. for fv in files:
  289. try:
  290. # 单个视频文件夹
  291. fi_d = os.path.join(cls.local_file_path()["local_file_path"], fv)
  292. # 确认为视频文件夹
  293. if os.path.isdir(fi_d):
  294. Common.logger(log_type).info('dir = {}'.format(fi_d))
  295. # 列出所有视频文件夹
  296. dir_files = os.listdir(fi_d)
  297. data = {'appType': '888888',
  298. 'crawlerSrcCode': cls.crawlersrccode(crawler),
  299. 'viewStatus': '1',
  300. 'versionCode': '1'}
  301. now_timestamp = int(round(time.time() * 1000))
  302. data['crawlerTaskTimestamp'] = str(now_timestamp)
  303. data['loginUid'] = cls.uids(crawler, strategy, our_uid, env)
  304. # 单个视频文件夹下的所有视频文件
  305. for fi in dir_files:
  306. # 视频文件夹下的所有文件路径
  307. fi_path = fi_d + '/' + fi
  308. Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
  309. # 读取 info.txt,赋值给 data
  310. if cls.local_file_path()["info_file"] in fi:
  311. f = open(fi_path, "r", encoding="UTF-8")
  312. # 读取数据 数据准确性写入的时候保证 读取暂不处理
  313. for i in range(14):
  314. line = f.readline()
  315. line = line.replace('\n', '')
  316. if line is not None and len(line) != 0 and not line.isspace():
  317. # Common.logger(log_type).info("line = {}".format(line))
  318. if i == 0:
  319. data['crawlerSrcId'] = line
  320. elif i == 1:
  321. data['title'] = line
  322. elif i == 2:
  323. data['totalTime'] = line
  324. elif i == 8:
  325. data['crawlerSrcPublishTimestamp'] = line
  326. else:
  327. Common.logger(log_type).warning("{} line is None".format(fi_path))
  328. f.close()
  329. # remove info.txt
  330. cls.remove_local_file(log_type, fi_path)
  331. # 刷新数据
  332. dir_files = os.listdir(fi_d)
  333. for fi in dir_files:
  334. fi_path = fi_d + '/' + fi
  335. # Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
  336. # 上传oss
  337. if cls.local_file_path()["video_file"] in fi:
  338. global oss_video_file
  339. if env == "dev":
  340. oss_video_file = cls.oss_file_path_video.format("dev", today, data['crawlerSrcId'])
  341. elif env == "prod":
  342. oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
  343. elif env == "hk":
  344. oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
  345. Common.logger(log_type).info("oss_video_file = {}".format(oss_video_file))
  346. cls.put_file(log_type, oss_endpoint, oss_video_file, fi_path)
  347. data['videoPath'] = oss_video_file
  348. Common.logger(log_type).info("videoPath = {}".format(oss_video_file))
  349. elif cls.local_file_path()["image_file"] in fi:
  350. global oss_image_file
  351. if env == "dev":
  352. oss_image_file = cls.oss_file_path_image.format("env", today, data['crawlerSrcId'])
  353. elif env == "prod":
  354. oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
  355. elif env == "hk":
  356. oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
  357. Common.logger(log_type).info("oss_image_file = {}".format(oss_image_file))
  358. cls.put_file(log_type, oss_endpoint, oss_image_file, fi_path)
  359. data['coverImgPath'] = oss_image_file
  360. Common.logger(log_type).info("coverImgPath = {}".format(oss_image_file))
  361. # 全部remove
  362. cls.remove_local_file(log_type, fi_path)
  363. # 发布
  364. if env == "dev":
  365. video_id = cls.publish_video_dev(log_type, data)
  366. elif env == "prod":
  367. video_id = cls.publish_video_prod(log_type, data)
  368. elif env == "hk":
  369. video_id = cls.publish_video_prod(log_type, data)
  370. else:
  371. video_id = cls.publish_video_dev(log_type, data)
  372. cls.remove_local_file_dir(log_type, fi_d)
  373. Common.logger(log_type).info('video_id:{}', video_id)
  374. return video_id
  375. else:
  376. Common.logger(log_type).error('file not a dir = {}'.format(fi_d))
  377. except Exception as e:
  378. Common.logger(log_type).exception('upload_and_publish error', e)