hybrid_similarity.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. #!/usr/bin/env python3
  2. """
  3. 混合相似度计算模块
  4. 结合向量模型(text_embedding)和LLM模型(semantic_similarity)的结果
  5. 提供2种接口:
  6. 1. compare_phrases() - 单对计算
  7. 2. compare_phrases_cartesian() - 笛卡尔积批量计算 (M×N)
  8. """
  9. from typing import Dict, Any, Optional, List
  10. import asyncio
  11. import numpy as np
  12. from lib.text_embedding import compare_phrases as compare_phrases_embedding
  13. from lib.text_embedding_api import compare_phrases_cartesian as compare_phrases_cartesian_api
  14. from lib.semantic_similarity import compare_phrases as compare_phrases_semantic
  15. from lib.semantic_similarity import compare_phrases_cartesian as compare_phrases_cartesian_semantic
  16. from lib.config import get_cache_dir
  17. async def compare_phrases(
  18. phrase_a: str,
  19. phrase_b: str,
  20. weight_embedding: float = 0.5,
  21. weight_semantic: float = 0.5,
  22. embedding_model: str = "chinese",
  23. semantic_model: str = 'openai/gpt-4.1-mini',
  24. use_cache: bool = True,
  25. cache_dir_embedding: Optional[str] = None,
  26. cache_dir_semantic: Optional[str] = None,
  27. **semantic_kwargs
  28. ) -> Dict[str, Any]:
  29. """
  30. 混合相似度计算:同时使用向量模型和LLM模型,按权重组合结果
  31. Args:
  32. phrase_a: 第一个短语
  33. phrase_b: 第二个短语
  34. weight_embedding: 向量模型权重,默认 0.5
  35. weight_semantic: LLM模型权重,默认 0.5
  36. embedding_model: 向量模型名称,默认 "chinese"
  37. semantic_model: LLM模型名称,默认 'openai/gpt-4.1-mini'
  38. use_cache: 是否使用缓存,默认 True
  39. cache_dir_embedding: 向量模型缓存目录,默认从配置读取
  40. cache_dir_semantic: LLM模型缓存目录,默认从配置读取
  41. **semantic_kwargs: 其他传递给semantic_similarity的参数
  42. - temperature: 温度参数,默认 0.0
  43. - max_tokens: 最大token数,默认 65536
  44. - prompt_template: 自定义提示词模板
  45. - instructions: Agent系统指令
  46. - tools: Agent工具列表
  47. - name: Agent名称
  48. Returns:
  49. {
  50. "相似度": float, # 加权平均后的相似度 (0-1)
  51. "说明": str # 综合说明(包含各模型的分数和说明)
  52. }
  53. Examples:
  54. >>> # 使用默认权重 (0.5:0.5)
  55. >>> result = await compare_phrases("深度学习", "神经网络")
  56. >>> print(result['相似度']) # 加权平均后的相似度
  57. 0.82
  58. >>> # 自定义权重,更倾向向量模型
  59. >>> result = await compare_phrases(
  60. ... "深度学习", "神经网络",
  61. ... weight_embedding=0.7,
  62. ... weight_semantic=0.3
  63. ... )
  64. >>> # 使用不同的模型
  65. >>> result = await compare_phrases(
  66. ... "深度学习", "神经网络",
  67. ... embedding_model="multilingual",
  68. ... semantic_model="anthropic/claude-sonnet-4.5"
  69. ... )
  70. """
  71. # 验证权重
  72. total_weight = weight_embedding + weight_semantic
  73. if abs(total_weight - 1.0) > 0.001:
  74. raise ValueError(f"权重之和必须为1.0,当前为: {total_weight}")
  75. # 使用配置的缓存目录(如果未指定)
  76. if cache_dir_embedding is None:
  77. cache_dir_embedding = get_cache_dir("text_embedding")
  78. if cache_dir_semantic is None:
  79. cache_dir_semantic = get_cache_dir("semantic_similarity")
  80. # 并发调用两个模型
  81. embedding_task = asyncio.to_thread(
  82. compare_phrases_embedding,
  83. phrase_a=phrase_a,
  84. phrase_b=phrase_b,
  85. model_name=embedding_model,
  86. use_cache=use_cache,
  87. cache_dir=cache_dir_embedding
  88. )
  89. semantic_task = compare_phrases_semantic(
  90. phrase_a=phrase_a,
  91. phrase_b=phrase_b,
  92. model_name=semantic_model,
  93. use_cache=use_cache,
  94. cache_dir=cache_dir_semantic,
  95. **semantic_kwargs
  96. )
  97. # 等待两个任务完成
  98. embedding_result, semantic_result = await asyncio.gather(
  99. embedding_task,
  100. semantic_task
  101. )
  102. # 提取相似度分数
  103. score_embedding = embedding_result.get("相似度", 0.0)
  104. score_semantic = semantic_result.get("相似度", 0.0)
  105. # 计算加权平均
  106. final_score = (
  107. score_embedding * weight_embedding +
  108. score_semantic * weight_semantic
  109. )
  110. # 生成综合说明(格式化为清晰的结构)
  111. explanation = (
  112. f"【混合相似度】{final_score:.3f}(向量模型权重{weight_embedding},LLM模型权重{weight_semantic})\n\n"
  113. f"【向量模型】相似度={score_embedding:.3f}\n"
  114. f"{embedding_result.get('说明', 'N/A')}\n\n"
  115. f"【LLM模型】相似度={score_semantic:.3f}\n"
  116. f"{semantic_result.get('说明', 'N/A')}"
  117. )
  118. # 构建返回结果(与原接口完全一致)
  119. return {
  120. "相似度": final_score,
  121. "说明": explanation
  122. }
  123. async def compare_phrases_cartesian(
  124. phrases_a: List[str],
  125. phrases_b: List[str],
  126. max_concurrent: int = 50
  127. ) -> List[List[Dict[str, Any]]]:
  128. """
  129. 混合相似度笛卡尔积批量计算:M×N矩阵
  130. 结合向量模型API笛卡尔积(快速)和LLM并发调用(已优化)
  131. 使用默认权重:向量0.5,LLM 0.5
  132. Args:
  133. phrases_a: 第一组短语列表(M个)
  134. phrases_b: 第二组短语列表(N个)
  135. max_concurrent: 最大并发数,默认50(控制LLM调用并发)
  136. Returns:
  137. 嵌套列表 List[List[Dict]],每个Dict包含完整结果
  138. results[i][j] = {
  139. "相似度": float, # 混合相似度
  140. "说明": str # 包含向量和LLM的详细说明
  141. }
  142. Examples:
  143. >>> results = await compare_phrases_cartesian(
  144. ... ["深度学习"],
  145. ... ["神经网络", "Python"]
  146. ... )
  147. >>> print(results[0][0]['相似度']) # 混合相似度
  148. >>> print(results[0][1]['说明']) # 完整说明
  149. >>> # 自定义并发控制
  150. >>> results = await compare_phrases_cartesian(
  151. ... ["深度学习"],
  152. ... ["神经网络", "Python"],
  153. ... max_concurrent=100 # 提高并发数
  154. ... )
  155. """
  156. # 参数验证
  157. if not phrases_a or not phrases_b:
  158. return [[]]
  159. M, N = len(phrases_a), len(phrases_b)
  160. # 默认权重
  161. weight_embedding = 0.5
  162. weight_semantic = 0.5
  163. # 并发执行两个任务
  164. # 1. 向量模型:使用API笛卡尔积(一次调用获取M×N完整结果)
  165. embedding_task = asyncio.to_thread(
  166. compare_phrases_cartesian_api,
  167. phrases_a,
  168. phrases_b,
  169. max_concurrent # 传递并发参数(API不使用,但保持接口一致)
  170. )
  171. # 2. LLM模型:使用并发调用(M×N个任务,受max_concurrent控制)
  172. semantic_task = compare_phrases_cartesian_semantic(
  173. phrases_a,
  174. phrases_b,
  175. max_concurrent # 传递并发参数控制LLM调用
  176. )
  177. # 等待两个任务完成
  178. embedding_results, semantic_results = await asyncio.gather(
  179. embedding_task,
  180. semantic_task
  181. )
  182. # embedding_results[i][j] = {"相似度": float, "说明": str}
  183. # semantic_results[i][j] = {"相似度": float, "说明": str}
  184. # 构建嵌套列表,包含完整信息(带子模型详细说明)
  185. nested_results = []
  186. for i in range(M):
  187. row_results = []
  188. for j in range(N):
  189. # 获取子模型的完整结果
  190. embedding_result = embedding_results[i][j]
  191. semantic_result = semantic_results[i][j]
  192. score_embedding = embedding_result.get("相似度", 0.0)
  193. score_semantic = semantic_result.get("相似度", 0.0)
  194. # 计算加权平均
  195. final_score = (
  196. score_embedding * weight_embedding +
  197. score_semantic * weight_semantic
  198. )
  199. # 生成完整说明(包含子模型的详细说明)
  200. explanation = (
  201. f"【混合相似度】{final_score:.3f}(向量模型权重{weight_embedding},LLM模型权重{weight_semantic})\n\n"
  202. f"【向量模型】相似度={score_embedding:.3f}\n"
  203. f"{embedding_result.get('说明', 'N/A')}\n\n"
  204. f"【LLM模型】相似度={score_semantic:.3f}\n"
  205. f"{semantic_result.get('说明', 'N/A')}"
  206. )
  207. row_results.append({
  208. "相似度": final_score,
  209. "说明": explanation
  210. })
  211. nested_results.append(row_results)
  212. return nested_results
  213. def compare_phrases_sync(
  214. phrase_a: str,
  215. phrase_b: str,
  216. weight_embedding: float = 0.5,
  217. weight_semantic: float = 0.5,
  218. **kwargs
  219. ) -> Dict[str, Any]:
  220. """
  221. 混合相似度计算的同步版本(内部创建事件循环)
  222. Args:
  223. phrase_a: 第一个短语
  224. phrase_b: 第二个短语
  225. weight_embedding: 向量模型权重,默认 0.5
  226. weight_semantic: LLM模型权重,默认 0.5
  227. **kwargs: 其他参数(同 compare_phrases)
  228. Returns:
  229. 同 compare_phrases
  230. Examples:
  231. >>> result = compare_phrases_sync("深度学习", "神经网络")
  232. >>> print(result['相似度'])
  233. """
  234. return asyncio.run(
  235. compare_phrases(
  236. phrase_a=phrase_a,
  237. phrase_b=phrase_b,
  238. weight_embedding=weight_embedding,
  239. weight_semantic=weight_semantic,
  240. **kwargs
  241. )
  242. )
  243. if __name__ == "__main__":
  244. async def main():
  245. print("=" * 80)
  246. print("混合相似度计算示例")
  247. print("=" * 80)
  248. print()
  249. # 示例 1: 默认权重 (0.5:0.5)
  250. print("示例 1: 默认权重 (0.5:0.5)")
  251. print("-" * 80)
  252. result = await compare_phrases("深度学习", "神经网络")
  253. print(f"相似度: {result['相似度']:.3f}")
  254. print(f"说明:\n{result['说明']}")
  255. print()
  256. # 示例 2: 不相关的短语
  257. print("示例 2: 不相关的短语")
  258. print("-" * 80)
  259. result = await compare_phrases("编程", "吃饭")
  260. print(f"相似度: {result['相似度']:.3f}")
  261. print(f"说明:\n{result['说明']}")
  262. print()
  263. # 示例 3: 自定义权重,更倾向向量模型
  264. print("示例 3: 自定义权重 (向量:0.7, LLM:0.3)")
  265. print("-" * 80)
  266. result = await compare_phrases(
  267. "人工智能", "机器学习",
  268. weight_embedding=0.7,
  269. weight_semantic=0.3
  270. )
  271. print(f"相似度: {result['相似度']:.3f}")
  272. print(f"说明:\n{result['说明']}")
  273. print()
  274. # 示例 4: 完整输出示例
  275. print("示例 4: 完整输出示例")
  276. print("-" * 80)
  277. result = await compare_phrases("宿命感", "余华的小说")
  278. print(f"相似度: {result['相似度']:.3f}")
  279. print(f"说明:\n{result['说明']}")
  280. print()
  281. # 示例 5: 同步版本
  282. print("示例 5: 同步版本调用")
  283. print("-" * 80)
  284. result = compare_phrases_sync("Python", "编程语言")
  285. print(f"相似度: {result['相似度']:.3f}")
  286. print(f"说明:\n{result['说明']}")
  287. print()
  288. print("=" * 80)
  289. asyncio.run(main())