videos_similarity.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. import jieba
  2. import numpy as np
  3. import datetime
  4. import pandas as pd
  5. from odps import ODPS
  6. from utils import filter_video_status
  7. from config import set_config
  8. from log import Log
  9. config_, _ = set_config()
  10. log_ = Log()
  11. def get_data_from_odps(project, sql):
  12. odps = ODPS(
  13. access_id=config_.ODPS_CONFIG['ACCESSID'],
  14. secret_access_key=config_.ODPS_CONFIG['ACCESSKEY'],
  15. project=project,
  16. endpoint=config_.ODPS_CONFIG['ENDPOINT'],
  17. connect_timeout=3000,
  18. read_timeout=500000,
  19. pool_maxsize=1000,
  20. pool_connections=1000
  21. )
  22. try:
  23. with odps.execute_sql(sql=sql).open_reader() as reader:
  24. data_df = reader.to_pandas()
  25. except Exception as e:
  26. data_df = None
  27. return data_df
  28. def get_word_vector(s1, s2):
  29. """
  30. :param s1: 句子1
  31. :param s2: 句子2
  32. :return: 返回句子的余弦相似度
  33. """
  34. # 分词
  35. cut1 = jieba.lcut(s1, cut_all=False)
  36. cut2 = jieba.lcut(s2, cut_all=False)
  37. list_word1 = (','.join(cut1)).split(',')
  38. list_word2 = (','.join(cut2)).split(',')
  39. # 列出所有的词,取并集
  40. key_word = list(set(list_word1 + list_word2))
  41. # print(key_word)
  42. # 给定形状和类型的用0填充的矩阵存储向量
  43. word_vector1 = np.zeros(len(key_word))
  44. word_vector2 = np.zeros(len(key_word))
  45. # 计算词频
  46. # 依次确定向量的每个位置的值
  47. for i in range(len(key_word)):
  48. # 遍历key_word中每个词在句子中的出现次数
  49. for j in range(len(list_word1)):
  50. if key_word[i] == list_word1[j]:
  51. word_vector1[i] += 1
  52. for k in range(len(list_word2)):
  53. if key_word[i] == list_word2[k]:
  54. word_vector2[i] += 1
  55. # 输出向量
  56. # print(word_vector1)
  57. # print(word_vector2)
  58. return word_vector1, word_vector2
  59. def cos_dist(vec1, vec2):
  60. """
  61. :param vec1: 向量1
  62. :param vec2: 向量2
  63. :return: 返回两个向量的余弦相似度
  64. """
  65. dist1 = float(np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)))
  66. return dist1
  67. def get_movie_video_top_list():
  68. sql = "select videoid, title from videoods.movie_store_video_top_list;"
  69. data_df = get_data_from_odps(project='videoods', sql=sql)
  70. movie_videos = dict()
  71. for index, row in data_df.iterrows():
  72. movie_videos[int(row['videoid'])] = row['title']
  73. return movie_videos
  74. def get_sim_videos():
  75. now = datetime.datetime.now()
  76. log_.info(f"now = {datetime.datetime.strftime(now, '%Y-%m-%d %H:%M:%S')}")
  77. sql_create_time = datetime.datetime.strftime(now - datetime.timedelta(days=30), '%Y-%m-%d %H:%M:%S')
  78. if sql_create_time < '2022-04-22 16:40:00':
  79. sql_create_time = '2022-04-22 16:40:00'
  80. sql = f"SELECT video_id, create_time, title FROM videoods.movie_store_video_allow_list_final " \
  81. f"WHERE create_time>='{sql_create_time}';"
  82. data_df = get_data_from_odps(project='videoods', sql=sql)
  83. video_ids = [int(video_id) for video_id in data_df['video_id'].to_list()]
  84. # 对视频状态进行过滤
  85. filtered_videos = filter_video_status(list(video_ids))
  86. sim_videos = dict()
  87. for index, row in data_df.iterrows():
  88. video_id = int(row['video_id'])
  89. if video_id in filtered_videos:
  90. sim_videos[video_id] = row['title']
  91. return sim_videos
  92. def similarity_rank(movie_videos, sim_videos):
  93. sim_result = []
  94. for video_id, title in movie_videos.items():
  95. # item_sim = dict()
  96. for vid, title1 in sim_videos.items():
  97. vec1, vec2 = get_word_vector(title, title1)
  98. dist = cos_dist(vec1, vec2)
  99. if dist > 0:
  100. # item_sim[vid] = dist
  101. item_sim = {'top_video_id': video_id, 'vid': vid, 'dist': dist}
  102. sim_result.append(item_sim)
  103. dist_df = pd.DataFrame(sim_result, columns=['top_video_id', 'vid', 'dist'])
  104. dist_df.to_csv('./data/videos_dist.csv')
  105. # sim_result[video_id] = item_sim
  106. # print(video_id, item_sim)
  107. if __name__ == '__main__':
  108. # str_list = ['S手的生活.2020', '花X道Z', '肉Y不能.法语中字', '窃YU无罪']
  109. # s1 = "杀手的生活"
  110. # for s2 in str_list:
  111. # vec1, vec2 = get_word_vector(s1, s2)
  112. # dist1 = cos_dist(vec1, vec2)
  113. # print(dist1)
  114. movie_videos = get_movie_video_top_list()
  115. sim_videos = get_sim_videos()
  116. print(len(movie_videos), len(sim_videos))
  117. similarity_rank(movie_videos=movie_videos, sim_videos=sim_videos)