| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286 | from langchain_core.tools import toolfrom sqlalchemy.orm import Sessionfrom typing import Dict, Any, Tupleimport loggingfrom datetime import datetimeimport jsonimport osimport sysimport re# 添加项目根目录到系统路径sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))from database.db import SessionLocal, get_dbfrom database.models import KnowledgeParsingContent, KnowledgeExtractionContentfrom gemini import GeminiProcessor# 配置日志logging.basicConfig(level=logging.INFO)logger = logging.getLogger(__name__)# 配置常量BATCH_SIZE = 5  # 分批处理大小SCORE_THRESHOLD = 70  # 评分阈值# Define tools# evaluation_extraction_tool = Tool(#     func=lambda request_id, query_word: _evaluation_extraction_tool(request_id, query_word),#     name="evaluation_extraction_tool",#     description="知识评估与抽取工具,用于处理数据库中的数据,执行评估并抽取内容"# )@tooldef evaluation_extraction_tool(request_id: str, query_word: str) -> str:    """    知识评估与抽取工具。持续处理数据库中的数据,分批执行评估并创建KnowledgeExtractionContent对象。    对于评分大于70分的内容,会进行抽取并更新KnowledgeExtractionContent对象。        Args:        request_id: 请求ID,如果不提供则处理所有未处理的数据        query_word: 查询词,用于评估和抽取内容            Returns:        str: "success" 表示处理完成,"no data" 表示没有数据需要处理    """    # 使用上下文管理器自动管理数据库连接的生命周期    with SessionLocal() as db:        try:            # 使用新的批量处理函数            result = execute_continuous_evaluation_extraction(request_id, db, query_word)            return result        except Exception as e:            # 确保发生异常时回滚事务            db.rollback()            logger.error(f"评估抽取过程中出错: {e}")            return f"no data - 错误: {str(e)}"def execute_continuous_evaluation_extraction(request_id: str, db: Session, query_word: str) -> str:    """持续执行评估循环,直到数据库没有数据"""    logger.info(f"开始处理,request_id: {request_id}, query_word: {query_word}")        total_processed = 0    offset = 0        try:        while True:            # 分批获取待评估的内容,使用offset实现分页            contents = get_batch_contents_for_evaluation(request_id, db, BATCH_SIZE, offset)                        logger.info(f"获取到 {len(contents)} 条待评估内容")            if not contents:                if total_processed > 0:                    logger.info(f"处理完成,共处理 {total_processed} 条内容")                    db.commit()  # 确保最后一批数据被提交                    return "success"                return "no data"                        try:                # 批量评估内容并创建KnowledgeExtractionContent对象                evaluation_results = batch_evaluate_content(contents, db, request_id, query_word)                                print(f"""evaluation_results: {evaluation_results}""")                # 对评分大于阈值的内容进行抽取                high_score_results = [result for result in evaluation_results if result["score"] >= SCORE_THRESHOLD]                if high_score_results:                    logger.info(f"发现 {len(high_score_results)} 条高分内容,进行抽取")                    batch_extract_and_save_content(high_score_results, db, request_id, query_word)                                total_processed += len(contents)                offset += len(contents)  # 更新offset值,以便下次获取下一批数据                db.commit()  # 每批次处理完成后提交事务            except Exception as e:                # 当前批次处理失败时回滚事务                db.rollback()                logger.error(f"处理批次数据时出错: {e}")                # 继续处理下一批数据                offset += len(contents)    except Exception as e:        # 发生严重异常时回滚事务并抛出异常        db.rollback()        logger.error(f"执行评估抽取循环时出错: {e}")        raise    # 这里的代码永远不会被执行到,因为在while循环中,当contents为空时会返回def get_batch_contents_for_evaluation(request_id: str, db: Session, batch_size: int, offset: int = 0) -> list:    query = db.query(KnowledgeParsingContent).outerjoin(        KnowledgeExtractionContent,        KnowledgeParsingContent.id == KnowledgeExtractionContent.parsing_id    ).filter(        KnowledgeParsingContent.status == 5,  # 已完成提取的数据        KnowledgeParsingContent.request_id == request_id,        KnowledgeExtractionContent.parsing_id == None    )        return query.offset(offset).limit(batch_size).all()def batch_evaluate_content(contents: list, db: Session, request_id: str, query_word: str) -> list:    if not contents:        return []        try:        # 批量调用大模型进行评估        evaluation_results_raw = batch_call_llm_for_evaluation(contents, query_word)        # 处理评估结果        evaluation_results = []                for i, (parsing_id, score, score_reason, parsing_data) in enumerate(evaluation_results_raw):            # 创建KnowledgeExtractionContent对象            extraction_content = KnowledgeExtractionContent(                request_id=request_id,                parsing_id=parsing_id,                score=score,                score_reason=score_reason,                create_at=datetime.now()            )            db.add(extraction_content)                        evaluation_results.append({                "parsing_id": parsing_id,                "score": score,                "score_reason": score_reason,                "parsing_data": parsing_data,                "extraction_content": extraction_content            })                    return evaluation_results                except Exception as e:        logger.error(f"批量评估内容时出错: {e}")        # 将所有内容标记为处理失败        for content in contents:            content.status = 3  # 处理失败        return []def batch_extract_and_save_content(evaluation_results: list, db: Session, request_id: str, query_word: str) -> list:    if not evaluation_results:        return []        try:        # 批量调用大模型进行抽取        extraction_data_list = batch_call_llm_for_extraction(evaluation_results, query_word)                # 保存抽取结果到数据库        success_ids = []        failed_ids = []                for i, (extracted_data, clean_reason) in enumerate(extraction_data_list):            try:                evaluation_result = evaluation_results[i]                parsing_id = evaluation_result.get("parsing_id")                                if "extraction_content" in evaluation_result and parsing_id:                    # 更新已有对象的data字段和状态                    extraction_content = evaluation_result["extraction_content"]                    extraction_content.data = extracted_data                    extraction_content.clean_reason = clean_reason                    extraction_content.status = 2  # 处理完成                    success_ids.append(parsing_id)            except Exception as e:                logger.error(f"处理抽取结果 {i} 时出错: {e}")                if i < len(evaluation_results):                    failed_ids.append(evaluation_results[i].get("parsing_id"))                # 如果有失败的内容,将其标记为处理失败        if failed_ids:            logger.warning(f"有 {len(failed_ids)} 条内容抽取失败")            for result in evaluation_results:                if result.get("parsing_id") in failed_ids and "extraction_content" in result:                    result["extraction_content"].status = 3  # 处理失败                return success_ids    except Exception as e:        logger.error(f"批量抽取和保存内容时出错: {e}")        db.rollback()  # 确保发生异常时回滚事务        return []# 读取提示词文件def read_prompt_file(file_path):    """从文件中读取提示词"""    try:        with open(file_path, 'r', encoding='utf-8') as file:            return file.read()    except Exception as e:        logger.error(f"读取提示词文件 {file_path} 失败: {str(e)}")        return ""# 初始化 Gemini 处理器和提示词gemini_processor = GeminiProcessor()# 加载评估和抽取提示词project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))evaluation_prompt_path = os.path.join(project_root, 'prompt', 'evaluation.md')extraction_prompt_path = os.path.join(project_root, 'prompt', 'extraction.md')# 打印路径信息,用于调试EVALUATION_PROMPT = read_prompt_file(evaluation_prompt_path)EXTRACTION_PROMPT = read_prompt_file(extraction_prompt_path)def batch_call_llm_for_evaluation(contents: list, query_word: str) -> list:    """批量调用大模型进行内容评估    """    # 准备批量评估内容    evaluation_contents = []    for content in contents:        evaluation_contents.append({            "query_word": query_word,            "content": content.parsing_data        })        try:        # 批量调用 Gemini 进行评估        results = gemini_processor.batch_process(evaluation_contents, EVALUATION_PROMPT)                # 处理返回结果        evaluation_results = []        for i, result in enumerate(results):            result = re.sub(r'^\s*```json|\s*```\s*$', '', result, flags=re.MULTILINE).strip()            result = json.loads(result)            parsing_id = contents[i].id            parsing_data = contents[i].parsing_data               score = result.get("score", -2)            score_reason = result.get("reason", "")                        evaluation_results.append((parsing_id, score, score_reason, parsing_data))                return evaluation_results            except Exception as e:        logger.error(f"批量评估过程异常: {str(e)}")        # 返回默认结果        return [(content.id, 0, "评估过程异常", content.data if hasattr(content, 'data') else (content.parsing_data or "")) for content in contents]def batch_call_llm_for_extraction(evaluation_results: list, query_word: str) -> list:    # 准备批量抽取内容    extraction_contents = []    for result in evaluation_results:        parsing_data = result.get("parsing_data", "")        extraction_contents.append({            "query_word": query_word,            "content": parsing_data        })    try:        # 批量调用 Gemini 进行抽取        results = gemini_processor.batch_process(extraction_contents, EXTRACTION_PROMPT)                # 处理返回结果        extraction_results = []        for i, result in enumerate(results):            result = re.sub(r'^\s*```json|\s*```\s*$', '', result, flags=re.MULTILINE).strip()            result = json.loads(result)            extracted_data = result.get("extracted_content", "未提取到内容")            clean_reason = result.get("analysis_reason", "未返回原因")                        extraction_results.append((extracted_data, clean_reason))                return extraction_results            except Exception as e:        logger.error(f"批量抽取过程异常: {str(e)}")        # 返回空结果        return ["{}"] * len(evaluation_results)
 |