newContentIdTask.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. """
  2. @author: luojunhui
  3. """
  4. import json
  5. import time
  6. import asyncio
  7. from applications.config import Config
  8. from applications.log import logging
  9. from applications.functions.pqFunctions import publish_to_pq
  10. from applications.functions.common import shuffle_list
  11. from applications.functions.kimi import KimiServer
  12. from applications.spider import search_videos_from_web
  13. from applications.etl_function import *
  14. class NewContentIdTask(object):
  15. """
  16. 不存在历史已经发布的文章的匹配流程
  17. """
  18. def __init__(self, mysql_client):
  19. self.mysql_client = mysql_client
  20. self.config = Config()
  21. self.article_match_video_table = self.config.article_match_video_table
  22. self.article_text_table = self.config.article_text_table
  23. self.article_crawler_video_table = self.config.article_crawler_video_table
  24. self.gh_id_dict = json.loads(self.config.get_config_value("testAccountLevel2"))
  25. self.account_map = json.loads(self.config.get_config_value("accountMap"))
  26. self.spider_coroutines = self.config.get_config_value("spiderCoroutines")
  27. self.default_status = 0
  28. self.task_processing_status = 101
  29. self.task_defeat_status = 99
  30. self.article_text_table_error = 4
  31. self.max_process_times = 3
  32. async def get_tasks(self):
  33. """
  34. 获取 task
  35. :return:
  36. """
  37. # 获取 content_status 为 处理中 的任务,判断时间, 如果超过 1h 则,则将它改为 0, process_times + 1
  38. select_processing_sql = f"""
  39. SELECT trace_id, content_status_update_time, process_times
  40. FROM {self.article_match_video_table}
  41. WHERE content_status = {self.task_processing_status} and process_times <= {self.max_process_times};
  42. """
  43. processing_articles = await self.mysql_client.async_select(select_processing_sql)
  44. if processing_articles:
  45. processing_list = [
  46. {
  47. "trace_id": item[0],
  48. "content_status_update_time": item[1],
  49. "process_times": item[2]
  50. }
  51. for item in processing_articles
  52. ]
  53. for obj in processing_list:
  54. if int(time.time()) - obj['content_status_update_time'] >= 3600:
  55. # 认为该任务失败
  56. await self.roll_back_content_status_when_fails(
  57. process_times=obj['process_times'] + 1,
  58. trace_id=obj['trace_id']
  59. )
  60. # 将 process_times > 3 的任务的状态修改为失败
  61. update_status_sql = f"""
  62. UPDATE {self.article_match_video_table}
  63. SET content_status = %s
  64. WHERE process_times > %s;
  65. """
  66. await self.mysql_client.async_insert(
  67. update_status_sql,
  68. params=(self.task_defeat_status, self.max_process_times)
  69. )
  70. # 获取 process_times <= 3 且 content_status = 0 的任务
  71. select_sql = f"""
  72. SELECT trace_id, content_id, flow_pool_level, gh_id, process_times
  73. FROM {self.article_match_video_table}
  74. WHERE content_status = {self.default_status} and process_times <= {self.max_process_times}
  75. limit {self.spider_coroutines};
  76. """
  77. tasks = await self.mysql_client.async_select(select_sql)
  78. if tasks:
  79. return [
  80. {
  81. "trace_id": i[0],
  82. "content_id": i[1],
  83. "flow_pool_level": i[2],
  84. "gh_id": i[3],
  85. "process_times": i[4]
  86. }
  87. for i in tasks
  88. ]
  89. else:
  90. return []
  91. async def get_video_list(self, content_id):
  92. """
  93. 判断该文章是否存在历史匹配视频
  94. :param content_id
  95. :return:
  96. """
  97. sql = f"""
  98. SELECT id
  99. FROM {self.article_crawler_video_table}
  100. WHERE content_id = '{content_id}' and download_status = 2;
  101. """
  102. res_tuple = await self.mysql_client.async_select(sql)
  103. if len(res_tuple) >= 3:
  104. return True
  105. else:
  106. return False
  107. async def update_content_status(self, new_content_status, trace_id, ori_content_status):
  108. """
  109. :param new_content_status:
  110. :param trace_id:
  111. :param ori_content_status:
  112. :return:
  113. """
  114. update_sql = f"""
  115. UPDATE {self.article_match_video_table}
  116. SET content_status = %s, content_status_update_time = %s
  117. WHERE trace_id = %s and content_status = %s;
  118. """
  119. await self.mysql_client.async_insert(
  120. sql=update_sql,
  121. params=(
  122. new_content_status,
  123. int(time.time()),
  124. trace_id,
  125. ori_content_status
  126. )
  127. )
  128. async def roll_back_content_status_when_fails(self, process_times, trace_id):
  129. """
  130. 处理失败,回滚至初始状态,处理次数加 1
  131. :param process_times:
  132. :param trace_id:
  133. :return:
  134. """
  135. update_article_sql = f"""
  136. UPDATE {self.article_match_video_table}
  137. SET
  138. content_status = %s,
  139. content_status_update_time = %s,
  140. process_times = %s,
  141. WHERE trace_id = %s and content_status = %s;
  142. """
  143. await self.mysql_client.async_insert(
  144. sql=update_article_sql,
  145. params=(
  146. self.default_status,
  147. int(time.time()),
  148. process_times + 1,
  149. trace_id,
  150. self.task_processing_status
  151. )
  152. )
  153. async def judge_whether_same_content_id_is_processing(self, content_id):
  154. """
  155. 同一个 content_id只需要处理一次
  156. :param content_id:
  157. :return:
  158. """
  159. select_sql = f"""
  160. SELECT distinct content_status
  161. FROM {self.article_match_video_table}
  162. WHERE content_id = '{content_id}';
  163. """
  164. result = await self.mysql_client.async_select(select_sql)
  165. if result:
  166. for item in result:
  167. content_status = item[0]
  168. if content_status != self.default_status:
  169. return True
  170. return False
  171. else:
  172. return False
  173. async def get_downloaded_videos(self, content_id):
  174. """
  175. 获取已下载的视频
  176. :return:
  177. """
  178. sql = f"""
  179. SELECT platform, play_count, like_count, video_oss_path, cover_oss_path, user_id
  180. FROM {self.article_crawler_video_table}
  181. WHERE content_id = '{content_id}' and download_status = 2;
  182. """
  183. res_tuple = await self.mysql_client.async_select(sql)
  184. return [
  185. {
  186. "platform": i[0],
  187. "play_count": i[1],
  188. "like_count": i[2],
  189. "video_oss_path": i[3],
  190. "cover_oss_path": i[4],
  191. "uid": i[5]
  192. }
  193. for i in res_tuple]
  194. async def get_kimi_status(self, content_id):
  195. """
  196. 通过 content_id 获取kimi info
  197. :return:
  198. """
  199. select_sql = f"""
  200. select kimi_status
  201. from {self.article_text_table}
  202. where content_id = '{content_id}';
  203. """
  204. response = await self.mysql_client.async_select(select_sql)
  205. if response:
  206. kimi_status = response[0][0]
  207. return kimi_status
  208. else:
  209. return self.article_text_table_error
  210. async def kimi_task(self, params):
  211. """
  212. 执行 kimi 任务
  213. :return:
  214. """
  215. kimi_success_status = 1
  216. kimi_fail_status = 2
  217. content_id = params['content_id']
  218. trace_id = params['trace_id']
  219. process_times = params['process_times']
  220. kimi_status_code = await self.get_kimi_status(content_id=content_id)
  221. if kimi_status_code == kimi_success_status:
  222. await self.update_content_status(
  223. new_content_status=kimi_success_status,
  224. trace_id=trace_id,
  225. ori_content_status=self.default_status
  226. )
  227. """
  228. {
  229. "kimi_title": kimi_title,
  230. "ori_title": article_obj['article_title'],
  231. "kimi_summary": content_title,
  232. "kimi_keys": kimi_info['content_keys']
  233. }
  234. """
  235. get_kimi_sql = f"""
  236. SELECT article_title, kimi_title, kimi_summary, kimi_keys
  237. FROM {self.article_text_table}
  238. WHERE content_id = '{content_id}';
  239. """
  240. kimi_info = await self.mysql_client.async_select(get_kimi_sql)
  241. return {
  242. "kimi_title": kimi_info[0][1],
  243. "ori_title": kimi_info[0][0],
  244. "kimi_summary": kimi_info[0][2],
  245. "kimi_keys": json.loads(kimi_info[0][3])
  246. }
  247. elif kimi_status_code == self.article_text_table_error:
  248. """
  249. todo: 文章表和匹配表没有同步更新,暂时不处理此次任务
  250. """
  251. print("article_text表还没有更新")
  252. else:
  253. # 开始处理,讲 content_status 从 0 改为 101
  254. await self.update_content_status(
  255. new_content_status=self.task_processing_status,
  256. trace_id=trace_id,
  257. ori_content_status=self.default_status
  258. )
  259. K = KimiServer()
  260. try:
  261. select_sql = f"""
  262. select article_title, article_text
  263. from {self.article_text_table}
  264. where content_id = '{content_id}'
  265. """
  266. res = await self.mysql_client.async_select(select_sql)
  267. article_obj = {
  268. "article_title": res[0][0],
  269. "article_text": res[0][1],
  270. "content_id": content_id
  271. }
  272. kimi_info = await K.search_kimi_schedule(params=article_obj)
  273. kimi_title = kimi_info['k_title']
  274. content_title = kimi_info['content_title'].replace("'", "").replace('"', "")
  275. content_keys = json.dumps(kimi_info['content_keys'], ensure_ascii=False)
  276. update_kimi_sql = f"""
  277. UPDATE {self.article_text_table}
  278. SET
  279. kimi_title = %s,
  280. kimi_summary = %s,
  281. kimi_keys = %s,
  282. kimi_status = %s
  283. WHERE content_id = %s;"""
  284. await self.mysql_client.async_insert(
  285. sql=update_kimi_sql,
  286. params=(kimi_title, content_title, content_keys, kimi_success_status, params['content_id'])
  287. )
  288. await self.update_content_status(
  289. new_content_status=kimi_success_status,
  290. trace_id=trace_id,
  291. ori_content_status=self.task_processing_status
  292. )
  293. return {
  294. "kimi_title": kimi_title,
  295. "ori_title": article_obj['article_title'],
  296. "kimi_summary": content_title,
  297. "kimi_keys": kimi_info['content_keys']
  298. }
  299. except Exception as e:
  300. # kimi 任务处理失败
  301. update_kimi_sql = f"""
  302. UPDATE {self.article_text_table}
  303. SET
  304. kimi_status = %s
  305. WHERE content_id = %s
  306. """
  307. await self.mysql_client.async_insert(
  308. sql=update_kimi_sql,
  309. params=(kimi_fail_status, content_id)
  310. )
  311. # 将状态由 101 回退为 0
  312. await self.roll_back_content_status_when_fails(
  313. process_times=process_times,
  314. trace_id=trace_id
  315. )
  316. return {}
  317. async def spider_task(self, params, kimi_result):
  318. """
  319. 爬虫任务
  320. :return:
  321. todo: 任务执行之前加一个判断,判断是存在 3 条以上的视频已经被抓取
  322. """
  323. spider_default_status = 1
  324. spider_success_status = 2
  325. trace_id = params['trace_id']
  326. content_id = params['content_id']
  327. process_times = params['process_times']
  328. gh_id = params['gh_id']
  329. select_sql = f"""
  330. select count(id) from {self.article_crawler_video_table} where content_id = '{content_id}';
  331. """
  332. count_tuple = await self.mysql_client.async_select(select_sql)
  333. counts = count_tuple[0][0]
  334. if counts >= 3:
  335. await self.update_content_status(
  336. new_content_status=spider_success_status,
  337. trace_id=trace_id,
  338. ori_content_status=spider_default_status
  339. )
  340. return True
  341. try:
  342. # 开始处理,将状态由 1 改成 101
  343. await self.update_content_status(
  344. new_content_status=self.task_processing_status,
  345. ori_content_status=spider_default_status,
  346. trace_id=trace_id
  347. )
  348. search_videos_count = await search_videos_from_web(
  349. info={
  350. "ori_title": kimi_result['ori_title'],
  351. "kimi_summary": kimi_result['kimi_summary'],
  352. "kimi_keys": kimi_result['kimi_keys'],
  353. "trace_id": trace_id,
  354. "gh_id": gh_id,
  355. "content_id": content_id,
  356. "crawler_video_table": self.article_crawler_video_table
  357. },
  358. gh_id_map=self.account_map,
  359. db_client=self.mysql_client
  360. )
  361. if search_videos_count >= 3:
  362. # 表示爬虫任务执行成功, 将状态从 101 改未 2
  363. await self.update_content_status(
  364. new_content_status=spider_success_status,
  365. trace_id=trace_id,
  366. ori_content_status=self.task_processing_status
  367. )
  368. return True
  369. else:
  370. await self.roll_back_content_status_when_fails(
  371. process_times=process_times + 1,
  372. trace_id=trace_id
  373. )
  374. return False
  375. except Exception as e:
  376. await self.roll_back_content_status_when_fails(
  377. process_times=process_times + 1,
  378. trace_id=trace_id
  379. )
  380. print("爬虫处理失败: {}".format(e))
  381. return False
  382. async def etl_task(self, params):
  383. """
  384. download && upload videos
  385. :param params:
  386. :return:
  387. """
  388. video_download_success_status = 2
  389. video_download_fail_status = 3
  390. etl_task_default_status = 2
  391. etl_task_success_status = 3
  392. trace_id = params['trace_id']
  393. content_id = params['content_id']
  394. # 判断是否有三条已经下载完成的视频
  395. select_sql = f"""
  396. select count(id)
  397. from {self.article_crawler_video_table}
  398. where content_id = '{content_id}' and download_status = {video_download_success_status};
  399. """
  400. video_count_tuple = await self.mysql_client.async_select(select_sql)
  401. video_count = video_count_tuple[0][0]
  402. if video_count > 3:
  403. await self.update_content_status(
  404. ori_content_status=etl_task_default_status,
  405. trace_id=trace_id,
  406. new_content_status=etl_task_success_status
  407. )
  408. return True
  409. else:
  410. # 开始处理, 将文章状态修改为处理状态
  411. await self.update_content_status(
  412. ori_content_status=etl_task_default_status,
  413. trace_id=trace_id,
  414. new_content_status=self.task_processing_status
  415. )
  416. select_sql = f"""
  417. SELECT id, out_video_id, platform, video_title, video_url, cover_url, user_id, trace_id
  418. FROM {self.article_crawler_video_table}
  419. WHERE content_id = '{content_id}' and download_status != {video_download_success_status}
  420. ORDER BY score DESC;
  421. """
  422. videos_need_to_download_tuple = await self.mysql_client.async_select(select_sql)
  423. downloaded_count = 0
  424. for line in videos_need_to_download_tuple:
  425. params = {
  426. "id": line[0],
  427. "video_id": line[1],
  428. "platform": line[2],
  429. "video_title": line[3],
  430. "video_url": line[4],
  431. "cover_url": line[5],
  432. "user_id": line[6],
  433. "trace_id": line[7]
  434. }
  435. try:
  436. local_video_path, local_cover_path = generate_video_path(params['platform'], params['video_id'])
  437. # download videos
  438. file_path = await download_video(
  439. file_path=local_video_path,
  440. platform=params['platform'],
  441. video_url=params['video_url']
  442. )
  443. # download cover
  444. cover_path = await download_cover(
  445. file_path=local_cover_path,
  446. platform=params['platform'],
  447. cover_url=params['cover_url']
  448. )
  449. oss_video = await upload_to_oss(
  450. local_video_path=file_path,
  451. download_type="video"
  452. )
  453. if cover_path:
  454. oss_cover = await upload_to_oss(
  455. local_video_path=cover_path,
  456. download_type="image"
  457. )
  458. else:
  459. oss_cover = None
  460. update_sql = f"""
  461. UPDATE {self.article_crawler_video_table}
  462. SET video_oss_path = %s, cover_oss_path = %s, download_status = %s
  463. WHERE id = %s;
  464. """
  465. await self.mysql_client.async_insert(
  466. sql=update_sql,
  467. params=(
  468. oss_video,
  469. oss_cover,
  470. video_download_success_status,
  471. params['id']
  472. )
  473. )
  474. downloaded_count += 1
  475. if downloaded_count > 3:
  476. await self.update_content_status(
  477. ori_content_status=self.task_processing_status,
  478. trace_id=trace_id,
  479. new_content_status=etl_task_success_status
  480. )
  481. return True
  482. except Exception as e:
  483. update_sql = f"""
  484. UPDATE {self.article_crawler_video_table}
  485. SET download_status = %s
  486. WHERE id = %s;
  487. """
  488. await self.mysql_client.async_insert(
  489. sql=update_sql,
  490. params=(video_download_fail_status, params['id'])
  491. )
  492. if downloaded_count >= 3:
  493. await self.update_content_status(
  494. ori_content_status=self.task_processing_status,
  495. trace_id=trace_id,
  496. new_content_status=etl_task_success_status
  497. )
  498. return True
  499. else:
  500. await self.roll_back_content_status_when_fails(
  501. process_times=params['process_times'] + 1,
  502. trace_id=params['trace_id']
  503. )
  504. return False
  505. async def publish_task(self, params, kimi_title):
  506. """
  507. 发布任务
  508. :param kimi_title:
  509. :param params:
  510. :return:
  511. """
  512. publish_default_status = 3
  513. publish_success_status = 4
  514. gh_id = params['gh_id']
  515. flow_pool_level = params['flow_pool_level']
  516. content_id = params['content_id']
  517. trace_id = params['trace_id']
  518. process_times = params['process_times']
  519. # 开始处理,将状态修改为操作状态
  520. await self.update_content_status(
  521. ori_content_status=publish_default_status,
  522. trace_id=trace_id,
  523. new_content_status=self.task_processing_status
  524. )
  525. try:
  526. download_videos = await self.get_downloaded_videos(content_id)
  527. match flow_pool_level:
  528. case "autoArticlePoolLevel4":
  529. # 冷启层, 全量做
  530. video_list = shuffle_list(download_videos)[:3]
  531. case "autoArticlePoolLevel3":
  532. if self.gh_id_dict.get(gh_id):
  533. video_list = shuffle_list(download_videos)[:3]
  534. else:
  535. video_list = download_videos[:3]
  536. case "autoArticlePoolLevel2":
  537. # 次条,只针对具体账号做
  538. video_list = []
  539. case "autoArticlePoolLevel1":
  540. # 头条,先不做
  541. video_list = download_videos[:3]
  542. case _:
  543. video_list = download_videos[:3]
  544. L = []
  545. for video_obj in video_list:
  546. params = {
  547. "videoPath": video_obj['video_oss_path'],
  548. "uid": video_obj['uid'],
  549. "title": kimi_title
  550. }
  551. response = await publish_to_pq(params)
  552. time.sleep(2)
  553. obj = {
  554. "uid": video_obj['uid'],
  555. "source": video_obj['platform'],
  556. "kimiTitle": kimi_title,
  557. "videoId": response['data']['id'],
  558. "videoCover": response['data']['shareImgPath'],
  559. "videoPath": response['data']['videoPath'],
  560. "videoOss": video_obj['video_oss_path']
  561. }
  562. L.append(obj)
  563. update_sql = f"""
  564. UPDATE {self.article_match_video_table}
  565. SET content_status = %s, response = %s, process_times = %s
  566. WHERE trace_id = %s and content_status = %s;
  567. """
  568. # 从操作中状态修改为已发布状态
  569. await self.mysql_client.async_insert(
  570. sql=update_sql,
  571. params=(
  572. publish_success_status,
  573. json.dumps(L, ensure_ascii=False),
  574. process_times + 1,
  575. trace_id,
  576. self.task_processing_status
  577. )
  578. )
  579. except Exception as e:
  580. await self.roll_back_content_status_when_fails(
  581. process_times=params['process_times'] + 1,
  582. trace_id=params['trace_id']
  583. )
  584. print(e)
  585. async def start_process(self, params):
  586. """
  587. 处理单篇文章
  588. :param params:
  589. :return:
  590. """
  591. # step1: 执行 kimi 操作
  592. kimi_result = await self.kimi_task(params)
  593. if kimi_result:
  594. # 等待 kimi 操作执行完成之后,开始执行 spider_task
  595. print("kimi success")
  596. spider_flag = await self.spider_task(params=params, kimi_result=kimi_result)
  597. if spider_flag:
  598. # 等待爬虫执行完成后,开始执行 etl_task
  599. print("spider success")
  600. etl_flag = await self.etl_task(params)
  601. if etl_flag:
  602. # 等待下载上传完成,执行发布任务
  603. print("etl success")
  604. try:
  605. await self.publish_task(params, kimi_result['kimi_title'])
  606. except Exception as e:
  607. logging(
  608. code="9001",
  609. info="publish 失败--{}".format(e),
  610. trace_id=params['trace_id']
  611. )
  612. else:
  613. logging(
  614. code="8001",
  615. info="ETL 处理失败",
  616. trace_id=params['trace_id']
  617. )
  618. else:
  619. logging(
  620. code="7002",
  621. info="爬虫处理失败",
  622. trace_id=params['trace_id']
  623. )
  624. else:
  625. logging(
  626. code="6001",
  627. info="kimi 处理失败",
  628. trace_id=params['trace_id']
  629. )
  630. async def process_task(self, params):
  631. """
  632. 处理任务
  633. :return:
  634. """
  635. content_id = params['content_id']
  636. download_videos = await self.get_video_list(content_id)
  637. if not download_videos:
  638. # 开始处理, 判断是否有相同的文章 id 正在处理
  639. processing_flag = await self.judge_whether_same_content_id_is_processing(content_id)
  640. if processing_flag:
  641. logging(
  642. code="9001",
  643. info="该 content id 正在处理中, 跳过此任务"
  644. )
  645. else:
  646. await self.start_process(params=params)
  647. else:
  648. print("存在已下载视频")
  649. async def deal(self):
  650. """
  651. function
  652. :return:
  653. """
  654. task_list = await self.get_tasks()
  655. print(task_list)
  656. logging(
  657. code="5001",
  658. info="Match Task Got {} this time".format(len(task_list)),
  659. function="Publish Task"
  660. )
  661. if task_list:
  662. tasks = [self.process_task(params) for params in task_list]
  663. await asyncio.gather(*tasks)
  664. else:
  665. logging(
  666. code="9008",
  667. info="没有要处理的请求"
  668. )