|
@@ -20,7 +20,7 @@ logging.basicConfig(level=logging.INFO)
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
# 配置常量
|
|
|
-BATCH_SIZE = 10 # 分批处理大小
|
|
|
+BATCH_SIZE = 5 # 分批处理大小
|
|
|
SCORE_THRESHOLD = 70 # 评分阈值
|
|
|
|
|
|
# Define tools
|
|
@@ -105,25 +105,15 @@ def execute_continuous_evaluation_extraction(request_id: str, db: Session, query
|
|
|
# 这里的代码永远不会被执行到,因为在while循环中,当contents为空时会返回
|
|
|
|
|
|
def get_batch_contents_for_evaluation(request_id: str, db: Session, batch_size: int, offset: int = 0) -> list:
|
|
|
- """分批获取待评估的内容
|
|
|
-
|
|
|
- Args:
|
|
|
- request_id: 请求ID
|
|
|
- db: 数据库会话
|
|
|
- batch_size: 批量大小
|
|
|
- offset: 偏移量,用于分页
|
|
|
-
|
|
|
- Returns:
|
|
|
- 待评估内容列表
|
|
|
- """
|
|
|
- query = db.query(KnowledgeParsingContent).filter(
|
|
|
- KnowledgeParsingContent.status == 2 # 已完成提取的数据
|
|
|
+ query = db.query(KnowledgeParsingContent).outerjoin(
|
|
|
+ KnowledgeExtractionContent,
|
|
|
+ KnowledgeParsingContent.id == KnowledgeExtractionContent.parsing_id
|
|
|
+ ).filter(
|
|
|
+ KnowledgeParsingContent.status == 2, # 已完成提取的数据
|
|
|
+ KnowledgeParsingContent.request_id == request_id,
|
|
|
+ KnowledgeExtractionContent.parsing_id == None
|
|
|
)
|
|
|
|
|
|
- # 如果指定了request_id,则只处理该request_id的数据
|
|
|
- if request_id:
|
|
|
- query = query.filter(KnowledgeParsingContent.request_id == request_id)
|
|
|
-
|
|
|
return query.offset(offset).limit(batch_size).all()
|
|
|
|
|
|
def batch_evaluate_content(contents: list, db: Session, request_id: str, query_word: str) -> list:
|
|
@@ -246,22 +236,16 @@ def batch_call_llm_for_evaluation(contents: list, query_word: str) -> list:
|
|
|
try:
|
|
|
# 批量调用 Gemini 进行评估
|
|
|
results = gemini_processor.batch_process(evaluation_contents, EVALUATION_PROMPT)
|
|
|
-
|
|
|
+
|
|
|
# 处理返回结果
|
|
|
evaluation_results = []
|
|
|
for i, result in enumerate(results):
|
|
|
result = re.sub(r'^\s*```json|\s*```\s*$', '', result, flags=re.MULTILINE).strip()
|
|
|
+ result = json.loads(result)
|
|
|
parsing_id = contents[i].id
|
|
|
- parsing_data = contents[i].parsing_data
|
|
|
-
|
|
|
- if isinstance(result, dict) and "score" in result:
|
|
|
- # 正常结果
|
|
|
- score = result.get("score", -2)
|
|
|
- reason = result.get("reason", "")
|
|
|
- else:
|
|
|
- # 异常结果
|
|
|
- score = -2
|
|
|
- reason = "评估失败"
|
|
|
+ parsing_data = contents[i].parsing_data
|
|
|
+ score = result.get("score", -2)
|
|
|
+ reason = result.get("reason", "")
|
|
|
|
|
|
evaluation_results.append((parsing_id, score, reason, parsing_data))
|
|
|
|
|
@@ -282,6 +266,7 @@ def batch_call_llm_for_extraction(evaluation_results: list, query_word: str) ->
|
|
|
"content": parsing_data
|
|
|
})
|
|
|
|
|
|
+ logger.info(f"批量抽取内容: {extraction_contents}")
|
|
|
try:
|
|
|
# 批量调用 Gemini 进行抽取
|
|
|
results = gemini_processor.batch_process(extraction_contents, EXTRACTION_PROMPT)
|
|
@@ -290,11 +275,7 @@ def batch_call_llm_for_extraction(evaluation_results: list, query_word: str) ->
|
|
|
extraction_results = []
|
|
|
for i, result in enumerate(results):
|
|
|
result = re.sub(r'^\s*```json|\s*```\s*$', '', result, flags=re.MULTILINE).strip()
|
|
|
- # 确保结果包含必要的字段
|
|
|
- if not isinstance(result, dict):
|
|
|
- result = {"extracted_data": str(result)}
|
|
|
-
|
|
|
- extraction_results.append(json.dumps(result, ensure_ascii=False))
|
|
|
+ extraction_results.append(result)
|
|
|
|
|
|
return extraction_results
|
|
|
|