Explorar el Código

增加requirements.txt

罗俊辉 hace 10 meses
padre
commit
7230e32f04
Se han modificado 3 ficheros con 60 adiciones y 83 borrados
  1. 50 76
      applications/textSimilarity.py
  2. 9 6
      requirements.txt
  3. 1 1
      test/rank_dev.py

+ 50 - 76
applications/textSimilarity.py

@@ -1,92 +1,66 @@
 """
 @author: luojunhui
 """
+import time
 import torch
 import numpy as np
 from similarities import BertSimilarity
 
-model = BertSimilarity(model_name_or_path="BAAI/bge-large-zh-v1.5")
 
+# bge_large_zh_v1_5 = 'bge_large_zh_v1_5'
+# text2vec_base_chinese = "text2vec_base_chinese"
+# text2vec_bge_large_chinese = "text2vec_bge_large_chinese"
 
-bge_large_zh_v1_5 = 'bge_large_zh_v1_5'
-text2vec_base_chinese = "text2vec_base_chinese"
-text2vec_bge_large_chinese = "text2vec_bge_large_chinese"
 
+class NLPFunction(object):
+    """
+    NLP Task
+    """
 
-def get_sim_score_by_pair(model, pair):
-    try:
-        score_tensor = model.similarity(pair['text_a'], pair['text_b'])
-        return score_tensor.squeeze().tolist()
-    except Exception as e:
-        raise
-
-
-def get_sim_score_by_pair_list(model, pair_list):
-    try:
-        res = [get_sim_score_by_pair(model, pair) for pair in pair_list['text_pair_list']]
-        return res
-    except Exception as e:
-        raise
+    def __init__(self, model):
+        self.model = model
 
+    def base_string_similarity(self, text_dict):
+        """
+        基础功能,计算两个字符串的相似度
+        :param text_dict:
+        :return:
+        """
+        score_tensor = self.model.similarity(
+            text_dict['text_a'],
+            text_dict['text_b']
+        )
+        return score_tensor.squeeze().tolist()
 
-def get_sim_score_by_list_pair(model, list_pair):
-    try:
-        score_tensor = model.similarity(list_pair['text_list_a'], list_pair['text_list_b'])
+    def base_list_similarity(self, pair_list_dict):
+        """
+        计算两个list的相似度
+        :return: "score_list_b": [100, 1000, 500, 40],
+        """
+        score_tensor = self.model.similarity(
+            pair_list_dict['text_list_a'],
+            pair_list_dict['text_list_b']
+        )
         return score_tensor.tolist()
-    except Exception as e:
-        raise
-
-
-def get_sim_score_max(model, data):
-    try:
-        score_list_max = []
-        text_list_max = []
-        score_array = get_sim_score_by_list_pair(model, data)
-        text_list_a, text_list_b = data['text_list_a'], data['text_list_b']
-        for i, row in enumerate(score_array):
-            max_index = np.argmax(row)
-            max_value = row[max_index]
-            score_list_max.append(max_value)
-            text_list_max.append(text_list_b[max_index])
-        return score_list_max, text_list_max, score_array
-    except Exception as e:
-        logger.error(f"Error in get_sim_score_max: {e}")
-        raise
-
-
-def score_to_attention(score, symbol=1):
-    try:
-        score_pred = torch.FloatTensor(score).unsqueeze(0)
-        score_norm = symbol * torch.nn.functional.normalize(score_pred, p=2, dim=1)
-        score_attn = torch.nn.functional.softmax(score_norm, dim=1)
-        return score_attn, score_norm, score_pred
-    except Exception as e:
-        logger.error(f"Error in score_to_attention: {e}")
-        raise
-
-
-def get_sim_score_avg(model, data):
-    try:
-        text_list_a, text_list_b = data['text_list_a'], data['text_list_b']
-        score_list_b, symbol = data['score_list_b'], data['symbol']
-        score_list_max, text_list_max, score_array = get_sim_score_max(model, data)
-        score_attn, score_norm, score_pred = score_to_attention(score_list_b, symbol=symbol)
-        score_tensor = torch.tensor(score_array)
-        score_res = torch.matmul(score_tensor, score_attn.transpose(0, 1))
-        score_list = score_res.squeeze(-1).tolist()
-        return score_list, text_list_max, score_array
-    except Exception as e:
-        logger.error(f"Error in get_sim_score_avg: {e}")
-        raise
 
 
-def get_sim_score_mean(model, data):
-    try:
-        text_list_a, text_list_b = data['text_list_a'], data['text_list_b']
-        score_list_max, text_list_max, score_array = get_sim_score_max(model, data)
-        score_tensor = torch.tensor(score_array)
-        score_res = torch.mean(score_tensor, dim=1)
-        score_list = score_res.tolist()
-        return score_list, text_list_max, score_array
-    except Exception as e:
-        raise
+if __name__ == '__main__':
+    a = time.time()
+    m = BertSimilarity(model_name_or_path="BAAI/bge-large-zh-v1.5")
+    b = time.time()
+    print("模型加载时间:\t", b - a)
+    NF = NLPFunction(m)
+    td = {
+        "text_a": "王者荣耀",
+        "text_b": "斗罗大陆"
+    }
+    tld = {
+        "text_list_a": ["凯旋", "圣洁", "篮球"],
+        "text_list_b": ["胜利", "纯洁", "足球"]
+    }
+    # res = NF.base_string_similarity(text_dict=td)
+    res = NF.base_list_similarity(pair_list_dict=tld)
+    c = time.time()
+    print("计算时间:\t", c - b)
+    for i in res:
+        print(i)

+ 9 - 6
requirements.txt

@@ -1,6 +1,6 @@
 aiofiles
 aiohttp
-aiomysql
+aiomysql~=0.2.0
 aiosignal
 alembic
 aliyun-log-python-sdk
@@ -9,7 +9,7 @@ httpx
 huggingface-hub
 hypercorn
 matplotlib
-numpy
+numpy~=1.26.4
 odps
 openai
 openpyxl
@@ -18,10 +18,13 @@ packaging
 pandas
 pymysql
 pyodps
-quart
-requests
+quart~=0.19.6
+requests~=2.32.3
 scipy
 selenium
-torch
-tqdm
+torch~=2.3.1
+tqdm~=4.66.4
 transformers
+
+pydantic~=2.6.4
+similarities~=1.1.7

+ 1 - 1
test/rank_dev.py

@@ -2,7 +2,7 @@ import json
 import requests
 import time
 
-url = "http://61.48.133.26:6060/articleRank"
+url = "http://47.98.154.124:6060/articleRank"
 with open("body.json") as f:
     data = json.loads(f.read())
 body = json.loads(data['data'])