|
@@ -4,6 +4,7 @@ import tempfile
|
|
|
import face_recognition
|
|
|
import requests
|
|
|
import logging
|
|
|
+import jieba.analyse
|
|
|
|
|
|
from flask import Flask, request, jsonify, json
|
|
|
|
|
@@ -56,12 +57,21 @@ def create_app():
|
|
|
temp_file = tempfile.NamedTemporaryFile(delete=False)
|
|
|
file.save(temp_file.name)
|
|
|
find = find_faces_in_image(temp_file.name, file.filename)
|
|
|
- os.remove(temp_file.name)# 删除临时文件
|
|
|
+ os.remove(temp_file.name) # 删除临时文件
|
|
|
results = return_json(find)
|
|
|
return results
|
|
|
return jsonify({'error': 'An error occurred'}), 500
|
|
|
- return app
|
|
|
|
|
|
+ @app.route('/title/split', methods=['POST'])
|
|
|
+ def get_title_split():
|
|
|
+ if 'title' not in request.json:
|
|
|
+ return jsonify({'error': 'No title provided'}), 400
|
|
|
+ title = request.json.get('title')
|
|
|
+ if not title:
|
|
|
+ return jsonify({'error': 'not title'}), 400
|
|
|
+ return evaluate(title, 3)
|
|
|
+
|
|
|
+ return app
|
|
|
|
|
|
|
|
|
# 调用 setup_logging 函数创建并配置日志记录器
|
|
@@ -87,8 +97,10 @@ def setup_logging():
|
|
|
|
|
|
return logger
|
|
|
|
|
|
+
|
|
|
logger = setup_logging()
|
|
|
|
|
|
+
|
|
|
def return_json(find):
|
|
|
result = {
|
|
|
"code": 0,
|
|
@@ -104,8 +116,6 @@ def return_json(find):
|
|
|
return json_result
|
|
|
|
|
|
|
|
|
-
|
|
|
-
|
|
|
# 全局变量存储已知人脸编码和名称
|
|
|
known_face_encodings = []
|
|
|
known_face_names = []
|
|
@@ -124,6 +134,7 @@ def load_known_faces(known_faces_dir):
|
|
|
known_face_encodings.append(face_encoding)
|
|
|
known_face_names.append(os.path.splitext(filename)[0])
|
|
|
|
|
|
+
|
|
|
def get_face_similarity(known_face_encoding, face_encoding):
|
|
|
# 计算两张脸的面部编码之间的距离
|
|
|
face_distances = face_recognition.face_distance([known_face_encoding], face_encoding)
|
|
@@ -131,6 +142,7 @@ def get_face_similarity(known_face_encoding, face_encoding):
|
|
|
similarity = 1 - face_distances
|
|
|
return similarity
|
|
|
|
|
|
+
|
|
|
def find_faces_in_image(image_path, url):
|
|
|
image = face_recognition.load_image_file(image_path)
|
|
|
face_locations = face_recognition.face_locations(image)
|
|
@@ -152,7 +164,39 @@ def find_faces_in_image(image_path, url):
|
|
|
return False
|
|
|
|
|
|
|
|
|
-
|
|
|
+def load_top_tags():
|
|
|
+ kv = {}
|
|
|
+ try:
|
|
|
+ with open('top_tags.txt', 'r', encoding='utf-8') as cache_file:
|
|
|
+ for index, line in enumerate(cache_file):
|
|
|
+ if index == 10000:
|
|
|
+ break
|
|
|
+ line = line.strip()
|
|
|
+ if not line:
|
|
|
+ continue
|
|
|
+ k, _ = line.split('\t')
|
|
|
+ kv[k] = index
|
|
|
+ except FileNotFoundError:
|
|
|
+ print("未找到 top_tags.txt 文件。")
|
|
|
+ return kv
|
|
|
+
|
|
|
+
|
|
|
+kv = load_top_tags()
|
|
|
+
|
|
|
+
|
|
|
+def evaluate(title, top_size):
|
|
|
+ if title is None or len(title) == 0:
|
|
|
+ return ""
|
|
|
+ keys = jieba.analyse.extract_tags(title, topK=top_size, withWeight=False, allowPOS=('n', 'v'))
|
|
|
+ if keys is None or len(keys) == 0:
|
|
|
+ return ""
|
|
|
+ keys_filter = []
|
|
|
+ for k in keys:
|
|
|
+ if k in kv:
|
|
|
+ keys_filter.append(k)
|
|
|
+ if len(keys_filter) == 0:
|
|
|
+ return ""
|
|
|
+ return ",".join(keys_filter)
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|