|
|
@@ -200,16 +200,19 @@ def render_grounding_prompt(
|
|
|
task: str,
|
|
|
draft: Dict,
|
|
|
compact_tree: str,
|
|
|
+ reference_paths: List[str] = None,
|
|
|
) -> str:
|
|
|
"""渲染 Stage 2 prompt"""
|
|
|
if task == "capability":
|
|
|
target = "capabilities 数组中的每一条 capability"
|
|
|
else:
|
|
|
target = "strategy;如果 strategy 为 null,则原样返回"
|
|
|
+ paths_str = json.dumps(reference_paths or [], ensure_ascii=False)
|
|
|
return (
|
|
|
template
|
|
|
.replace("{target}", target)
|
|
|
.replace("{compact_tree}", compact_tree)
|
|
|
+ .replace("{reference_paths}", paths_str)
|
|
|
.replace("{draft_json}", json.dumps(draft, ensure_ascii=False, indent=2))
|
|
|
)
|
|
|
|
|
|
@@ -266,14 +269,19 @@ async def ground_single_case(
|
|
|
if all_keywords:
|
|
|
categories = await search_categories_by_keywords(all_keywords, top_k=5)
|
|
|
workflow_compact_tree = build_compact_tree(categories)
|
|
|
+ workflow_ref_paths = list(dict.fromkeys(
|
|
|
+ c["path"] for c in categories if c.get("path")
|
|
|
+ ))
|
|
|
else:
|
|
|
workflow_compact_tree = compact_tree or "[]"
|
|
|
+ workflow_ref_paths = []
|
|
|
else:
|
|
|
workflow_compact_tree = compact_tree or "[]"
|
|
|
+ workflow_ref_paths = []
|
|
|
|
|
|
# 整个 workflow 传给 LLM(保持上下文)
|
|
|
draft = {"strategy": workflow}
|
|
|
- prompt = render_grounding_prompt(template, "strategy", draft, workflow_compact_tree)
|
|
|
+ prompt = render_grounding_prompt(template, "strategy", draft, workflow_compact_tree, workflow_ref_paths)
|
|
|
messages = [{"role": "user", "content": prompt}]
|
|
|
|
|
|
grounded, cost = await call_llm_with_retry(
|
|
|
@@ -336,14 +344,19 @@ async def ground_single_case(
|
|
|
if all_keywords:
|
|
|
categories = await search_categories_by_keywords(all_keywords, top_k=5)
|
|
|
cap_compact_tree = build_compact_tree(categories)
|
|
|
+ cap_ref_paths = list(dict.fromkeys(
|
|
|
+ c["path"] for c in categories if c.get("path")
|
|
|
+ ))
|
|
|
else:
|
|
|
cap_compact_tree = compact_tree or "[]"
|
|
|
+ cap_ref_paths = []
|
|
|
else:
|
|
|
cap_compact_tree = compact_tree or "[]"
|
|
|
+ cap_ref_paths = []
|
|
|
|
|
|
# 整个 capabilities 传给 LLM(保持上下文)
|
|
|
draft = {"capabilities": capabilities}
|
|
|
- prompt = render_grounding_prompt(template, "capability", draft, cap_compact_tree)
|
|
|
+ prompt = render_grounding_prompt(template, "capability", draft, cap_compact_tree, cap_ref_paths)
|
|
|
messages = [{"role": "user", "content": prompt}]
|
|
|
|
|
|
grounded, cost = await call_llm_with_retry(
|
|
|
@@ -400,7 +413,7 @@ async def apply_grounding(
|
|
|
cases = case_data.get("cases", [])
|
|
|
|
|
|
# 检查是否使用 API 动态搜索模式
|
|
|
- use_api = os.getenv("USE_SEARCH_API", "false").lower() == "true"
|
|
|
+ use_api = os.getenv("USE_SEARCH_API", "true").lower() == "true"
|
|
|
|
|
|
# 如果不使用 API,预加载完整内容树
|
|
|
compact_tree = None
|