ai_tag_task.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. import os
  2. import json
  3. import datetime
  4. import traceback
  5. from threading import Timer
  6. from utils import data_check, get_feature_data, asr_validity_discrimination
  7. from whisper_asr import get_whisper_asr
  8. from gpt_tag import request_gpt
  9. from config import set_config
  10. from log import Log
  11. config_ = set_config()
  12. log_ = Log()
  13. features = ['videoid', 'title', 'video_path']
  14. def get_video_ai_tags(video_id, video_file, video_info):
  15. try:
  16. log_message = {
  17. 'videoId': int(video_id),
  18. }
  19. title = video_info.get('title')
  20. log_message['videoPath'] = video_info.get('video_path')
  21. log_message['title'] = video_info.get('title')
  22. # 1. asr
  23. asr_res_initial = get_whisper_asr(video=video_file)
  24. log_message['asrRes'] = asr_res_initial
  25. # 2. 判断asr识别的文本是否有效
  26. validity = asr_validity_discrimination(text=asr_res_initial)
  27. log_message['asrValidity'] = validity
  28. if validity is True:
  29. # 3. 对asr结果进行清洗
  30. asr_res = asr_res_initial.replace('\n', '')
  31. for stop_word in config_.STOP_WORDS:
  32. asr_res = asr_res.replace(stop_word, '')
  33. # token限制: 字数 <= 2500
  34. asr_res = asr_res[-2500:]
  35. # 4. gpt产出结果
  36. # 4.1 gpt产出summary, keywords,
  37. prompt1 = f"{config_.GPT_PROMPT['tags']['prompt6']}{asr_res.strip()}"
  38. log_message['gptPromptSummaryKeywords'] = prompt1
  39. gpt_res1 = request_gpt(prompt=prompt1)
  40. log_message['gptResSummaryKeywords'] = gpt_res1
  41. if gpt_res1 is not None:
  42. # 4.2 获取summary, keywords, title进行分类
  43. try:
  44. gpt_res1_json = json.loads(gpt_res1)
  45. summary = gpt_res1_json['summary']
  46. keywords = gpt_res1_json['keywords']
  47. log_message['summary'] = summary
  48. log_message['keywords'] = keywords
  49. prompt2_param = f"标题:{title}\n概况:{summary}\n关键词:{keywords}"
  50. prompt2 = f"{config_.GPT_PROMPT['tags']['prompt7']}{prompt2_param}"
  51. log_message['gptPromptTag'] = prompt2
  52. gpt_res2 = request_gpt(prompt=prompt2)
  53. log_message['gptResTag'] = gpt_res2
  54. if gpt_res2 is not None:
  55. confidence_up_list = []
  56. try:
  57. for item in json.loads(gpt_res2):
  58. if item['confidence'] > 0.5:
  59. confidence_up_list.append(item['category'])
  60. except:
  61. pass
  62. confidence_up = ', '.join(confidence_up_list)
  63. result[0].extend([prompt2, gpt_res2, confidence_up])
  64. except:
  65. result[0].extend(['', '', '', '', ''])
  66. else:
  67. result = [[str(validity), '', '', '', '', '', '', '']]
  68. log_.info(f"result = {result}")
  69. if len(result) > 0:
  70. feishu_helper.update_values(
  71. sheet_token=res_spreadsheet_token,
  72. sheet_id=res_sheet_id,
  73. data=result,
  74. start_row=write_start_row,
  75. start_column=write_start_col,
  76. end_column=write_end_col
  77. )
  78. log_.info(f"write to feishu success!")
  79. write_start_row += 1
  80. except Exception as e:
  81. log_.error(e)
  82. log_.error(traceback.format_exc())
  83. continue
  84. def ai_tags(project, table, dt):
  85. # 获取特征数据
  86. feature_df = get_feature_data(project=project, table=table, dt=dt, features=features)
  87. video_id_list = feature_df['videoid'].to_list()
  88. video_info = {}
  89. for video_id in video_id_list:
  90. title = feature_df[feature_df['videoid'] == video_id]['title'].values[0]
  91. video_path = feature_df[feature_df['videoid'] == video_id]['video_path'].values[0]
  92. if title is None:
  93. continue
  94. title = title.strip()
  95. if len(title) > 0:
  96. video_info[video_id] = {'title': title, 'video_path': video_path}
  97. # print(video_id, title)
  98. print(len(video_info))
  99. # 获取已下载视频
  100. download_folder = 'videos'
  101. video_folder_list = os.listdir(download_folder)
  102. for video_id in video_folder_list:
  103. if video_id not in video_id_list:
  104. continue
  105. if video_info.get(video_id, None) is None:
  106. os.rmdir(os.path.join(download_folder, video_id))
  107. else:
  108. video_folder = os.path.join(download_folder, video_id)
  109. for filename in os.listdir(video_folder):
  110. video_type = filename.split('.')[-1]
  111. if video_type in ['mp4', 'm3u8']:
  112. video_file = os.path.join(video_folder, filename)
  113. get_video_ai_tags(video_id=video_id, video_file=video_file, video_info=video_info.get(video_id))
  114. print(video_folder_list)
  115. def timer_check():
  116. try:
  117. project = config_.DAILY_VIDEO['project']
  118. table = config_.DAILY_VIDEO['table']
  119. now_date = datetime.datetime.today()
  120. print(f"now_date: {datetime.datetime.strftime(now_date, '%Y%m%d')}")
  121. dt = datetime.datetime.strftime(now_date-datetime.timedelta(days=1), '%Y%m%d')
  122. # 查看数据是否已准备好
  123. data_count = data_check(project=project, table=table, dt=dt)
  124. if data_count > 0:
  125. print(f'videos count = {data_count}')
  126. # 数据准备好,进行视频下载
  127. ai_tags(project=project, table=table, dt=dt)
  128. print(f"videos ai tag finished!")
  129. else:
  130. # 数据没准备好,1分钟后重新检查
  131. Timer(60, timer_check).start()
  132. except Exception as e:
  133. print(f"视频ai打标签失败, exception: {e}, traceback: {traceback.format_exc()}")
  134. if __name__ == '__main__':
  135. timer_check()