extract_workflow.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. """
  2. 逐 case 提取 workflow + fragments (v6版本)
  3. 从 case.json 读取,按 index 遍历每个 case,
  4. 调用 LLM 同时提取 workflow(薄壳 steps)和 fragments(原子操作,含完整 capability 字段),
  5. 按 index 原位回填到 case.json
  6. v6 架构特性:
  7. - workflow.steps 是薄壳:step_id / order / phase / relation / body,不含 capability 字段
  8. - fragments 是原子操作列表:每个 fragment 含完整 capability 字段 + workflow_step_ref + is_alternative_to
  9. - 步内多原子操作 + 步内 alternative 都在 fragment 层表达
  10. - standalone fragment(workflow_step_ref=null)用于无 workflow 上下文的能力提及
  11. """
  12. import asyncio
  13. import json
  14. from pathlib import Path
  15. from typing import Any, Dict, Optional, List
  16. from examples.process_pipeline.script.llm_helper import call_llm_with_retry
  17. # v5 词库文件路径
  18. SCRIPT_DIR = Path(__file__).resolve().parent
  19. METHOD_VOCAB_PATH = SCRIPT_DIR / "resource" / "method_vocab_v5.json"
  20. # 默认词库(如果文件不存在时使用)
  21. DEFAULT_METHOD_VOCAB = {
  22. "流程角色": [
  23. "生成指令", "编辑指令", "约束条件", "参考素材", "控制信号",
  24. "区域控制", "参数配置", "模型资源", "源素材", "中间产物",
  25. "成品", "模板", "评估结果"
  26. ],
  27. "模态": ["文本", "图片", "视频", "音频", "特征点", "参数", "模型", "向量", "表格"],
  28. "主动作": [
  29. "生成", "编辑", "提取", "改写", "合成", "修复", "增强",
  30. "训练", "评估", "剪辑", "模板化", "排版", "转写", "配音",
  31. "匹配", "扩展", "导出"
  32. ],
  33. "动作方式": [
  34. "直接生成", "一致性保持", "结构约束", "质量收束", "局部重绘",
  35. "扩图", "换背景", "提示词反推", "模板化", "多图融合", "清晰化",
  36. "风格迁移", "常规编辑", "变体生成", "动画化", "镜头延展",
  37. "换主体", "换装", "擦除", "调色", "前后景融合", "图文合成",
  38. "音画合成", "分层叠加", "特征提取", "蒙版提取", "关键帧提取",
  39. "字幕提取", "风格提取", "片段拼接", "节奏压缩", "转场编排",
  40. "字幕对齐", "音画同步", "降噪", "补帧", "超分", "稳定化",
  41. "质感增强", "结构抽象", "变量抽象", "版式套用", "格式转换", "压缩导出"
  42. ],
  43. }
  44. def load_method_vocab() -> Dict[str, list]:
  45. """从 JSON 文件加载结构化词库(v5)"""
  46. if METHOD_VOCAB_PATH.exists():
  47. try:
  48. with open(METHOD_VOCAB_PATH, "r", encoding="utf-8") as f:
  49. return json.load(f)
  50. except Exception as e:
  51. print(f"Warning: Failed to load method_vocab.json: {e}, using default")
  52. return DEFAULT_METHOD_VOCAB
  53. def load_prompt_template(prompt_name: str) -> str:
  54. base_dir = Path(__file__).parent.parent
  55. prompt_path = base_dir / "prompts" / f"{prompt_name}.prompt"
  56. with open(prompt_path, "r", encoding="utf-8") as f:
  57. content = f.read()
  58. if content.startswith("---"):
  59. parts = content.split("---", 2)
  60. if len(parts) >= 3:
  61. content = parts[2]
  62. content = content.replace("$system$", "").replace("$user$", "")
  63. return content.strip()
  64. def render_method_vocab_block(vocab: Dict[str, list]) -> str:
  65. """渲染结构化接口词库说明(v5)"""
  66. lines = [
  67. "\n# 结构化接口词库(v5,必须遵守)",
  68. "只输出结构化 inputs / outputs / action。",
  69. "- `role/流程角色` 只写接口职责,不写具体内容 what。",
  70. "- `modality/模态` 只写媒介或数据形态;统一用 `图片`,不要写 `图像`;统一用 `文本`,不要写 `文字`。",
  71. "- `artifact_type/工件类型` 写该模态下的具体工件,如 `正向提示词`、`蒙版`。",
  72. "- `action.main_action` 写主动作;`action.mechanism` 写动作内部机制。",
  73. "- 只有词库确实不够时才新增术语;新增术语也必须抽象、短、可复用。",
  74. "",
  75. "当前词库:",
  76. ]
  77. for key, values in vocab.items():
  78. lines.append(f"- {key}:{'、'.join(values)}")
  79. return "\n".join(lines)
  80. async def extract_workflow_from_case(
  81. case_item: Dict[str, Any],
  82. llm_call: Any,
  83. model: str = "anthropic/claude-sonnet-4-5"
  84. ) -> tuple[Optional[Dict[str, Any]], Optional[List[Dict[str, Any]]], float]:
  85. """
  86. 从单个 case item 同时提取 workflow(薄壳 steps)和 fragments(原子操作列表)。
  87. Returns:
  88. (workflow_dict, fragments_list, cost)
  89. workflow_dict 为 None 表示 skip 或提取失败
  90. fragments_list 为 None 表示 skip 或提取失败
  91. """
  92. images = case_item.get("images", [])
  93. case_copy = dict(case_item)
  94. case_copy.pop("images", None)
  95. case_copy.pop("_raw", None)
  96. case_copy.pop("workflow", None)
  97. case_copy.pop("fragments", None)
  98. case_copy.pop("capabilities", None)
  99. if not case_copy and not images:
  100. return None, None, 0.0
  101. title = case_item.get("title", "")[:20] or "untitled"
  102. context = json.dumps(case_copy, ensure_ascii=False, indent=2)
  103. try:
  104. prompt_template = load_prompt_template("extract_workflow")
  105. method_vocab = load_method_vocab()
  106. vocab_block = render_method_vocab_block(method_vocab)
  107. if "%context%" in prompt_template:
  108. prompt = prompt_template.replace("%context%", context)
  109. else:
  110. prompt = prompt_template + f"\n\n## 帖子内容\n{context}"
  111. if "{interface_vocab}" in prompt:
  112. prompt = prompt.replace("{interface_vocab}", vocab_block)
  113. elif vocab_block not in prompt:
  114. prompt = prompt + "\n" + vocab_block
  115. except Exception as e:
  116. print(f"Warning: Failed to load prompt template: {e}, using fallback")
  117. method_vocab = load_method_vocab()
  118. vocab_block = render_method_vocab_block(method_vocab)
  119. prompt = f"""将以下帖子内容总结为AI图片生成的工序和原子操作,以JSON格式输出。
  120. # 输出格式(v6)
  121. {{
  122. "skip": false,
  123. "skip_reason": "",
  124. "workflow": {{
  125. "workflow_id": null,
  126. "steps": [
  127. {{
  128. "step_id": "s1",
  129. "order": 1,
  130. "phase": "生成",
  131. "relation": "[去向.最终成品]",
  132. "body": "string | null"
  133. }}
  134. ]
  135. }},
  136. "fragments": [
  137. {{
  138. "fragment_id": "f_s1_0",
  139. "action": {{"main_action": "生成", "mechanism": "直接生成"}},
  140. "inputs": [{{"modality": "文本", "description": "...", "relation": "[来源.原始输入]"}}],
  141. "outputs": [{{"modality": "图片", "description": "...", "relation": "[去向.最终成品]"}}],
  142. "body": "string | null",
  143. "effects": [
  144. {{
  145. "statement": "实现XXX",
  146. "criteria": "判断标准",
  147. "judge_method": "vlm",
  148. "negative_examples": []
  149. }}
  150. ],
  151. "control_target": [],
  152. "artifact_type": null,
  153. "tools": [],
  154. "apply_to_draft": {{"实质": ["..."], "形式": ["..."]}},
  155. "workflow_step_ref": {{"workflow_id": null, "step_id": "s1"}},
  156. "is_alternative_to": []
  157. }}
  158. ]
  159. }}
  160. {vocab_block}
  161. ## 帖子内容
  162. {context}
  163. 请严格按照上述格式输出JSON,不要包含其他内容。"""
  164. if images:
  165. image_urls = [img for img in images[:9] if isinstance(img, str) and img.startswith("http")]
  166. if image_urls:
  167. content_array = [{"type": "text", "text": prompt}]
  168. for url in image_urls:
  169. content_array.append({"type": "image_url", "image_url": {"url": url}})
  170. messages = [{"role": "user", "content": content_array}]
  171. else:
  172. messages = [{"role": "user", "content": prompt}]
  173. else:
  174. messages = [{"role": "user", "content": prompt}]
  175. result_data, cost = await call_llm_with_retry(
  176. llm_call=llm_call,
  177. messages=messages,
  178. model=model,
  179. temperature=0.1,
  180. max_tokens=10000,
  181. max_retries=3,
  182. schema_name="extract_workflow",
  183. task_name=f"Workflow_{title}",
  184. )
  185. if not result_data:
  186. return None, None, cost
  187. if result_data.get("skip"):
  188. return None, None, cost
  189. workflow_data = result_data.get("workflow")
  190. fragments_data = result_data.get("fragments", [])
  191. return workflow_data, fragments_data, cost
  192. async def extract_workflow(
  193. case_file: Path,
  194. llm_call: Any,
  195. model: str = "anthropic/claude-sonnet-4-5",
  196. max_concurrent: int = 3,
  197. case_indices: Optional[List[int]] = None
  198. ) -> Dict[str, Any]:
  199. """
  200. 按 index 遍历 case.json,提取 workflow
  201. Args:
  202. case_file: case.json 文件路径
  203. llm_call: LLM 调用函数
  204. model: 使用的模型
  205. max_concurrent: 最大并发数
  206. case_indices: 可选,指定要处理的 case index 列表。如果为 None,处理所有 case
  207. """
  208. with open(case_file, "r", encoding="utf-8") as f:
  209. case_data = json.load(f)
  210. cases = case_data.get("cases", [])
  211. # 如果指定了 case_indices,只处理这些 case
  212. if case_indices is not None:
  213. cases_to_process = [c for c in cases if c.get("index") in case_indices]
  214. print(f"Extracting workflow from {len(cases_to_process)} cases (filtered by indices: {case_indices})...")
  215. else:
  216. cases_to_process = cases
  217. print(f"Extracting workflow from {len(cases)} cases...")
  218. semaphore = asyncio.Semaphore(max_concurrent)
  219. async def process_with_semaphore(case_item):
  220. async with semaphore:
  221. index = case_item.get("index", 0)
  222. raw = case_item.get("_raw", {})
  223. case_id = raw.get("case_id", "unknown")
  224. title = case_item.get("title", "")
  225. print(f" -> [{index}] [{case_id}] extracting workflow: {title[:60]}")
  226. workflow, fragments, cost = await extract_workflow_from_case(case_item, llm_call, model)
  227. frag_count = len(fragments) if fragments else 0
  228. status = f"ok ({frag_count} fragments)" if workflow else "null"
  229. print(f" <- [{index}] [{case_id}] workflow {status}")
  230. result = dict(case_item)
  231. result["workflow"] = workflow
  232. result["fragments"] = fragments if fragments is not None else []
  233. return result, cost
  234. tasks = [process_with_semaphore(case) for case in cases_to_process]
  235. results_with_costs = await asyncio.gather(*tasks)
  236. results = [r[0] for r in results_with_costs]
  237. costs = [r[1] for r in results_with_costs]
  238. total_cost = sum(costs)
  239. success_count = sum(1 for r in results if r.get("workflow") and r.get("fragments"))
  240. failed_count = len(results) - success_count
  241. # 如果是部分更新,需要合并回原始 cases 列表
  242. if case_indices is not None:
  243. # 创建一个 index -> result 的映射
  244. result_map = {r.get("index"): r for r in results}
  245. # 更新原始 cases 列表中对应的项
  246. for i, case in enumerate(cases):
  247. if case.get("index") in result_map:
  248. cases[i] = result_map[case.get("index")]
  249. results = cases
  250. results.sort(key=lambda x: x.get("index", 0))
  251. case_data["cases"] = results
  252. case_file.parent.mkdir(parents=True, exist_ok=True)
  253. with open(case_file, "w", encoding="utf-8") as f:
  254. json.dump(case_data, f, ensure_ascii=False, indent=2)
  255. fragments_count = sum(len(r.get("fragments") or []) for r in results)
  256. return {
  257. "total": len(results),
  258. "success": success_count,
  259. "failed": failed_count,
  260. "fragments_total": fragments_count,
  261. "total_cost": total_cost,
  262. "output_file": str(case_file),
  263. }
  264. if __name__ == "__main__":
  265. import sys
  266. if len(sys.argv) < 2:
  267. print("Usage: python extract_workflow.py <output_dir>")
  268. sys.exit(1)
  269. output_dir = Path(sys.argv[1])
  270. case_file = output_dir / "case.json"
  271. if not case_file.exists():
  272. print(f"Error: {case_file} not found")
  273. sys.exit(1)
  274. print("Please use this module through run_pipeline.py")