ai_tag_task.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. import os
  2. import shutil
  3. import json
  4. import datetime
  5. import time
  6. import traceback
  7. import requests
  8. import multiprocessing
  9. import ODPSQueryUtil
  10. from threading import Timer
  11. from utils import data_check, get_feature_data, asr_validity_discrimination
  12. from whisper_asr import get_whisper_asr
  13. from gpt_tag import request_gpt
  14. from config import set_config
  15. from log import Log
  16. from result_save import insert_content
  17. from result_save import insert_content
  18. config_ = set_config()
  19. log_ = Log()
  20. features = ['videoid', 'title', 'video_path']
  21. def get_video_ai_tags(video_id, video_info):
  22. try:
  23. st_time = time.time()
  24. log_message = {
  25. 'videoId': int(video_id),
  26. }
  27. title = video_info.get('title')
  28. log_message['videoPath'] = video_info.get('video_path')
  29. log_message['title'] = video_info.get('title')
  30. # 1. 获取asr结果
  31. asr_res_initial = video_info.get('asr_res', '')
  32. log_message['asrRes'] = asr_res_initial
  33. # 2. 判断asr识别的文本是否有效
  34. validity = asr_validity_discrimination(text=asr_res_initial)
  35. log_message['asrValidity'] = validity
  36. if validity is True:
  37. # 3. 对asr结果进行清洗
  38. asr_res = asr_res_initial.replace('\n', '')
  39. for stop_word in config_.STOP_WORDS:
  40. asr_res = asr_res.replace(stop_word, '')
  41. # token限制: 字数 <= 2500
  42. asr_res = asr_res[-2500:]
  43. # 4. gpt产出结果
  44. # 4.1 gpt产出summary, keywords,
  45. prompt1 = f"{config_.GPT_PROMPT['tags']['prompt6']}{asr_res.strip()}"
  46. log_message['gptPromptSummaryKeywords'] = prompt1
  47. gpt_res1 = request_gpt(prompt=prompt1)
  48. log_message['gptResSummaryKeywords'] = gpt_res1
  49. if gpt_res1 is not None:
  50. # 4.2 获取summary, keywords, title进行分类
  51. try:
  52. gpt_res1_json = json.loads(gpt_res1)
  53. summary = gpt_res1_json['summary']
  54. keywords = gpt_res1_json['keywords']
  55. log_message['summary'] = summary
  56. log_message['keywords'] = str(keywords)
  57. # TODO 三个 prompt 拆分成三个请求
  58. prompt2_param = f"标题:{title}\n概况:{summary}\n关键词:{keywords}"
  59. prompt2 = f"{config_.GPT_PROMPT['tags']['prompt8']}{prompt2_param}"
  60. log_message['gptPrompt2'] = prompt2
  61. gpt_res2 = request_gpt(prompt=prompt2)
  62. log_message['gptRes2'] = gpt_res2
  63. prompt3 = f"{config_.GPT_PROMPT['tags']['prompt9']}{prompt2_param}"
  64. log_message['gptPrompt3'] = prompt3
  65. gpt_res3 = request_gpt(prompt=prompt3)
  66. log_message['gptRes3'] = gpt_res3
  67. prompt4 = f"{config_.GPT_PROMPT['tags']['prompt10']}{prompt2_param}"
  68. log_message['gptPrompt4'] = prompt4
  69. gpt_res4 = request_gpt(prompt=prompt4)
  70. log_message['gptRes4'] = gpt_res4
  71. # 5. 解析gpt产出结果
  72. parseRes = praseGptRes(gpt_res2, gpt_res3, gpt_res4)
  73. parseRes['video_id'] = video_id
  74. log_message.update(parseRes)
  75. # 6. 保存结果
  76. insert_content(parseRes)
  77. except:
  78. log_.error(traceback.format_exc())
  79. pass
  80. else:
  81. pass
  82. log_message['executeTime'] = (time.time() - st_time) * 1000
  83. log_.info(log_message)
  84. except Exception as e:
  85. log_.error(e)
  86. log_.error(traceback.format_exc())
  87. def praseGptRes(gpt_res2, gpt_res3, gpt_res4):
  88. result = {}
  89. if gpt_res2 is not None:
  90. try:
  91. res2 = json.loads(gpt_res2)
  92. result['key_words'] = res2['key_words']
  93. result['search_keys'] = res2['search_keys']
  94. result['extra_keys'] = res2['extra_keys']
  95. except:
  96. pass
  97. if gpt_res3 is not None:
  98. try:
  99. res3 = json.loads(gpt_res3)
  100. result['tone'] = res3['tone']
  101. result['target_audience'] = res3['target_audience']
  102. result['target_age'] = res3['target_age']
  103. except:
  104. pass
  105. if gpt_res4 is not None:
  106. try:
  107. res4 = json.loads(gpt_res4)
  108. result['category'] = res4['category']
  109. result['target_gender'] = res4['target_gender']
  110. result['address'] = res4['address']
  111. result['theme'] = res4['theme']
  112. except:
  113. pass
  114. return result
  115. def process(video_id, video_info, download_folder):
  116. if video_info.get(video_id, None) is None:
  117. shutil.rmtree(os.path.join(download_folder, video_id))
  118. else:
  119. video_folder = os.path.join(download_folder, video_id)
  120. for filename in os.listdir(video_folder):
  121. video_type = filename.split('.')[-1]
  122. if video_type in ['mp4', 'm3u8']:
  123. video_file = os.path.join(video_folder, filename)
  124. get_video_ai_tags(
  125. video_id=video_id, video_file=video_file, video_info=video_info.get(video_id))
  126. # 将处理过的视频进行删除
  127. shutil.rmtree(os.path.join(download_folder, video_id))
  128. else:
  129. shutil.rmtree(os.path.join(download_folder, video_id))
  130. def ai_tags(project, table, dt):
  131. # 获取特征数据
  132. feature_df = get_feature_data(
  133. project=project, table=table, dt=dt, features=features)
  134. video_id_list = feature_df['videoid'].to_list()
  135. video_info = {}
  136. for video_id in video_id_list:
  137. title = feature_df[feature_df['videoid']
  138. == video_id]['title'].values[0]
  139. video_path = feature_df[feature_df['videoid']
  140. == video_id]['video_path'].values[0]
  141. if title is None:
  142. continue
  143. title = title.strip()
  144. if len(title) > 0:
  145. video_info[video_id] = {'title': title, 'video_path': video_path}
  146. # print(video_id, title)
  147. print(len(video_info))
  148. # 获取已下载视频
  149. download_folder = 'videos'
  150. retry = 0
  151. while retry < 3:
  152. video_folder_list = os.listdir(download_folder)
  153. if len(video_folder_list) < 2:
  154. retry += 1
  155. time.sleep(60)
  156. continue
  157. # pool = multiprocessing.Pool(processes=5)
  158. # for video_id in video_folder_list:
  159. # if video_id not in video_id_list:
  160. # continue
  161. # pool.apply_async(
  162. # func=process,
  163. # args=(video_id, video_info, download_folder)
  164. # )
  165. # pool.close()
  166. # pool.join()
  167. for video_id in video_folder_list:
  168. if video_id not in video_id_list:
  169. continue
  170. if video_info.get(video_id, None) is None:
  171. shutil.rmtree(os.path.join(download_folder, video_id))
  172. else:
  173. video_folder = os.path.join(download_folder, video_id)
  174. for filename in os.listdir(video_folder):
  175. video_type = filename.split('.')[-1]
  176. if video_type in ['mp4', 'm3u8']:
  177. video_file = os.path.join(video_folder, filename)
  178. get_video_ai_tags(
  179. video_id=video_id, video_file=video_file, video_info=video_info.get(video_id))
  180. # 将处理过的视频进行删除
  181. shutil.rmtree(os.path.join(download_folder, video_id))
  182. else:
  183. shutil.rmtree(os.path.join(download_folder, video_id))
  184. def get_asr_res(video_id):
  185. # URL of the API endpoint
  186. url = 'http://61.48.133.26:5999/video_to_text'
  187. # Headers for the request
  188. headers = {
  189. 'Content-Type': 'application/json'
  190. }
  191. # Data to be sent in the request
  192. data = {
  193. "video_id": f"{video_id}"
  194. }
  195. # Making the POST request
  196. response = requests.post(url, headers=headers, json=data)
  197. # Checking if the request was successful
  198. if response.status_code == 200:
  199. # Extracting the 'text' field from the JSON response
  200. result_text = response.json().get('text', '无内容')
  201. return result_text
  202. else:
  203. return '无内容'
  204. def ai_tags_new(project, table, dt):
  205. # 获取特征数据
  206. feature_df = get_feature_data(
  207. project=project, table=table, dt=dt, features=features)
  208. video_id_list = feature_df['videoid'].to_list()
  209. video_info = {}
  210. for video_id in video_id_list:
  211. title = feature_df[feature_df['videoid']
  212. == video_id]['title'].values[0]
  213. video_path = feature_df[feature_df['videoid']
  214. == video_id]['video_path'].values[0]
  215. if title is None:
  216. continue
  217. title = title.strip()
  218. if len(title) < 1:
  219. continue
  220. # 获取asr结果
  221. asr_res = get_asr_res(video_id)
  222. video_info[video_id] = {'title': title,
  223. 'video_path': video_path, 'asr_res': asr_res}
  224. get_video_ai_tags(video_id=video_id,
  225. video_info=video_info.get(video_id))
  226. def timer_check():
  227. try:
  228. project = config_.DAILY_VIDEO['project']
  229. table = config_.DAILY_VIDEO['table']
  230. now_date = datetime.datetime.today()
  231. print(f"now_date: {datetime.datetime.strftime(now_date, '%Y%m%d')}")
  232. dt = datetime.datetime.strftime(
  233. now_date-datetime.timedelta(days=1), '%Y%m%d')
  234. # 查看数据是否已准备好
  235. data_count = data_check(project=project, table=table, dt=dt)
  236. if data_count > 0:
  237. print(f'videos count = {data_count}')
  238. asr_folder = 'asr_res'
  239. if not os.path.exists(asr_folder):
  240. # 1分钟后重新检查
  241. Timer(60, timer_check).start()
  242. else:
  243. # 数据准备好,进行aiTag
  244. ai_tags_new(project=project, table=table, dt=dt)
  245. print(f"videos ai tag finished!")
  246. else:
  247. # 数据没准备好,1分钟后重新检查
  248. Timer(60, timer_check).start()
  249. except Exception as e:
  250. print(
  251. f"视频ai打标签失败, exception: {e}, traceback: {traceback.format_exc()}")
  252. if __name__ == '__main__':
  253. timer_check()