123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253 |
- from langchain.tools import Tool
- from sqlalchemy.orm import Session
- from typing import Dict, Any, Tuple
- import logging
- from datetime import datetime
- import json
- import os
- import sys
- # 添加项目根目录到系统路径
- sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
- from database.db import SessionLocal, get_db
- from database.models import KnowledgeParsingContent, KnowledgeExtractionContent
- from gemini import GeminiProcessor
- # 配置日志
- logging.basicConfig(level=logging.INFO)
- logger = logging.getLogger(__name__)
- # 配置常量
- BATCH_SIZE = 10 # 分批处理大小
- SCORE_THRESHOLD = 70 # 评分阈值
- # Define tools
- @Tool
- def evaluation_extraction_tool(request_id: str, query_word: str) -> str:
- """
- 知识评估与抽取工具。持续处理数据库中的数据,分批执行评估并创建KnowledgeExtractionContent对象。
- 对于评分大于70分的内容,会进行抽取并更新KnowledgeExtractionContent对象。
-
- Args:
- request_id: 请求ID,如果不提供则处理所有未处理的数据
- query_word: 查询词,用于评估和抽取内容
-
- Returns:
- str: "success" 表示处理完成,"no data" 表示没有数据需要处理
- """
- try:
- db = SessionLocal()
- try:
- # 使用新的批量处理函数
- result = execute_continuous_evaluation_extraction(request_id, db, query_word)
- return result
- finally:
- db.close()
- except Exception as e:
- logger.error(f"评估抽取过程中出错: {e}")
- return f"no data - 错误: {str(e)}"
- def execute_continuous_evaluation_extraction(request_id: str, db: Session, query_word: str) -> str:
- """持续执行评估循环,直到数据库没有数据"""
- total_processed = 0
-
- while True:
- # 分批获取待评估的内容
- contents = get_batch_contents_for_evaluation(request_id, db, BATCH_SIZE)
-
- if not contents:
- if total_processed > 0:
- logger.info(f"处理完成,共处理 {total_processed} 条内容")
- return "success"
- return "no data"
-
- # 批量评估内容并创建KnowledgeExtractionContent对象
- evaluation_results = batch_evaluate_content(contents, db, request_id, query_word)
-
- # 对评分大于阈值的内容进行抽取
- high_score_results = [result for result in evaluation_results if result["score"] >= SCORE_THRESHOLD]
- if high_score_results:
- logger.info(f"发现 {len(high_score_results)} 条高分内容,进行抽取")
- batch_extract_and_save_content(high_score_results, db, request_id, query_word)
-
- total_processed += len(contents)
- db.commit()
- # 这里的代码永远不会被执行到,因为在while循环中,当contents为空时会返回
- def get_batch_contents_for_evaluation(request_id: str, db: Session, batch_size: int) -> list:
- """分批获取待评估的内容"""
- query = db.query(KnowledgeParsingContent).filter(
- KnowledgeParsingContent.status == 2 # 已完成提取的数据
- )
-
- # 如果指定了request_id,则只处理该request_id的数据
- if request_id:
- query = query.filter(KnowledgeParsingContent.request_id == request_id)
-
- return query.limit(batch_size).all()
- def batch_evaluate_content(contents: list, db: Session, request_id: str, query_word: str) -> list:
- if not contents:
- return []
-
- try:
- # 批量调用大模型进行评估
- evaluation_results_raw = batch_call_llm_for_evaluation(contents, query_word)
-
- # 处理评估结果
- evaluation_results = []
-
- for i, (parsing_id, score, reason, parsing_data) in enumerate(evaluation_results_raw):
- # 创建KnowledgeExtractionContent对象
- extraction_content = KnowledgeExtractionContent(
- request_id=request_id,
- parsing_id=parsing_id,
- score=score,
- reason=reason,
- create_at=datetime.now()
- )
- db.add(extraction_content)
-
- evaluation_results.append({
- "parsing_id": parsing_id,
- "score": score,
- "reason": reason,
- "parsing_data": parsing_data,
- "extraction_content": extraction_content
- })
-
- return evaluation_results
-
- except Exception as e:
- logger.error(f"批量评估内容时出错: {e}")
- # 将所有内容标记为处理失败
- for content in contents:
- content.status = 3 # 处理失败
- return []
- def batch_extract_and_save_content(evaluation_results: list, db: Session, request_id: str, query_word: str) -> list:
- if not evaluation_results:
- return []
-
- # 批量调用大模型进行抽取
- extraction_data_list = batch_call_llm_for_extraction(evaluation_results, query_word)
-
- # 保存抽取结果到数据库
- success_ids = []
- failed_ids = []
-
- for i, extraction_data in enumerate(extraction_data_list):
- try:
- evaluation_result = evaluation_results[i]
-
- # 更新已有对象的data字段和状态
- existing_extraction.data = evaluation_result["extraction_content"]
- existing_extraction.status = 2 # 处理完成
- success_ids.append(parsing_id)
- except Exception as e:
- logger.error(f"处理抽取结果 {i} 时出错: {e}")
- failed_ids.append(evaluation_results[i].get("parsing_id"))
-
- # 如果有失败的内容,将其标记为处理失败
- if failed_ids:
- logger.warning(f"有 {len(failed_ids)} 条内容抽取失败")
- for result in evaluation_results:
- if result.get("parsing_id") in failed_ids and "extraction_content" in result:
- result["extraction_content"].status = 3 # 处理失败
-
- return success_ids
- # 读取提示词文件
- def read_prompt_file(file_path):
- """从文件中读取提示词"""
- try:
- with open(file_path, 'r', encoding='utf-8') as file:
- return file.read()
- except Exception as e:
- logger.error(f"读取提示词文件 {file_path} 失败: {str(e)}")
- return ""
- # 初始化 Gemini 处理器和提示词
- gemini_processor = GeminiProcessor()
- # 加载评估和抽取提示词
- project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
- evaluation_prompt_path = os.path.join(project_root, 'prompt', 'evaluation.md')
- extraction_prompt_path = os.path.join(project_root, 'prompt', 'extraction.md')
- # 打印路径信息,用于调试
- logger.info(f"评估提示词路径: {evaluation_prompt_path}")
- logger.info(f"抽取提示词路径: {extraction_prompt_path}")
- EVALUATION_PROMPT = read_prompt_file(evaluation_prompt_path)
- EXTRACTION_PROMPT = read_prompt_file(extraction_prompt_path)
- def batch_call_llm_for_evaluation(contents: list, query_word: str) -> list:
- """批量调用大模型进行内容评估
- """
- # 准备批量评估内容
- evaluation_contents = []
- for content in contents:
- evaluation_contents.append({
- "query_word": query_word,
- "content": content.parsing_data
- })
-
- try:
- # 批量调用 Gemini 进行评估
- results = gemini_processor.batch_process(evaluation_contents, EVALUATION_PROMPT)
-
- # 处理返回结果
- evaluation_results = []
- for i, result in enumerate(results):
- parsing_id = contents[i].id
- parsing_data = contents[i].parsing_data
-
- if isinstance(result, dict) and "score" in result:
- # 正常结果
- score = result.get("score", -2)
- reason = result.get("reason", "")
- else:
- # 异常结果
- score = -2
- reason = "评估失败"
-
- evaluation_results.append((parsing_id, score, reason, parsing_data))
-
- return evaluation_results
-
- except Exception as e:
- logger.error(f"批量评估过程异常: {str(e)}")
- # 返回默认结果
- return [(content.id, 0, "评估过程异常", content.data if hasattr(content, 'data') else (content.parsing_data or "")) for content in contents]
- def batch_call_llm_for_extraction(evaluation_results: list, query_word: str) -> list:
- # 准备批量抽取内容
- extraction_contents = []
- for result in evaluation_results:
- parsing_data = result.get("parsing_data", "")
- extraction_contents.append({
- "query_word": query_word,
- "content": parsing_data
- })
-
- try:
- # 批量调用 Gemini 进行抽取
- results = gemini_processor.batch_process(extraction_contents, EXTRACTION_PROMPT)
-
- # 处理返回结果
- extraction_results = []
- for i, result in enumerate(results):
- # 确保结果包含必要的字段
- if not isinstance(result, dict):
- result = {"extracted_data": str(result)}
- extraction_results.append(json.dumps(result, ensure_ascii=False))
-
- return extraction_results
-
- except Exception as e:
- logger.error(f"批量抽取过程异常: {str(e)}")
- # 返回空结果
- return ["{}"] * len(evaluation_results)
|