refcontrol_pose_api.json 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. {
  2. "1": {
  3. "inputs": {
  4. "vae_name": "ae.safetensors"
  5. },
  6. "class_type": "VAELoader",
  7. "_meta": {
  8. "title": "Load VAE"
  9. }
  10. },
  11. "2": {
  12. "inputs": {
  13. "conditioning": [
  14. "18",
  15. 0
  16. ]
  17. },
  18. "class_type": "ConditioningZeroOut",
  19. "_meta": {
  20. "title": "ConditioningZeroOut"
  21. }
  22. },
  23. "3": {
  24. "inputs": {
  25. "guidance": 2.5,
  26. "conditioning": [
  27. "6",
  28. 0
  29. ]
  30. },
  31. "class_type": "FluxGuidance",
  32. "_meta": {
  33. "title": "FluxGuidance"
  34. }
  35. },
  36. "6": {
  37. "inputs": {
  38. "conditioning": [
  39. "18",
  40. 0
  41. ],
  42. "latent": [
  43. "15",
  44. 0
  45. ]
  46. },
  47. "class_type": "ReferenceLatent",
  48. "_meta": {
  49. "title": "ReferenceLatent"
  50. }
  51. },
  52. "9": {
  53. "inputs": {
  54. "seed": 219,
  55. "steps": 20,
  56. "cfg": 1,
  57. "sampler_name": "euler",
  58. "scheduler": "simple",
  59. "denoise": 1,
  60. "model": [
  61. "25",
  62. 0
  63. ],
  64. "positive": [
  65. "3",
  66. 0
  67. ],
  68. "negative": [
  69. "2",
  70. 0
  71. ],
  72. "latent_image": [
  73. "15",
  74. 0
  75. ]
  76. },
  77. "class_type": "KSampler",
  78. "_meta": {
  79. "title": "KSampler"
  80. }
  81. },
  82. "12": {
  83. "inputs": {
  84. "unet_name": "flux1-dev-kontext_fp8_scaled.safetensors",
  85. "weight_dtype": "default"
  86. },
  87. "class_type": "UNETLoader",
  88. "_meta": {
  89. "title": "Load Diffusion Model"
  90. }
  91. },
  92. "13": {
  93. "inputs": {
  94. "clip_name1": "clip_l.safetensors",
  95. "clip_name2": "t5xxl_fp8_e4m3fn_scaled.safetensors",
  96. "type": "flux",
  97. "device": "default"
  98. },
  99. "class_type": "DualCLIPLoader",
  100. "_meta": {
  101. "title": "DualCLIPLoader"
  102. }
  103. },
  104. "14": {
  105. "inputs": {
  106. "direction": "right",
  107. "match_image_size": true,
  108. "image1": [
  109. "29",
  110. 0
  111. ],
  112. "image2": [
  113. "30",
  114. 0
  115. ]
  116. },
  117. "class_type": "ImageConcanate",
  118. "_meta": {
  119. "title": "Image Concatenate"
  120. }
  121. },
  122. "15": {
  123. "inputs": {
  124. "pixels": [
  125. "20",
  126. 0
  127. ],
  128. "vae": [
  129. "1",
  130. 0
  131. ]
  132. },
  133. "class_type": "VAEEncode",
  134. "_meta": {
  135. "title": "VAE Encode"
  136. }
  137. },
  138. "16": {
  139. "inputs": {
  140. "images": [
  141. "20",
  142. 0
  143. ]
  144. },
  145. "class_type": "PreviewImage",
  146. "_meta": {
  147. "title": "Preview Image"
  148. }
  149. },
  150. "17": {
  151. "inputs": {
  152. "filename_prefix": "ComfyUI",
  153. "images": [
  154. "19",
  155. 0
  156. ]
  157. },
  158. "class_type": "SaveImage",
  159. "_meta": {
  160. "title": "Save Image"
  161. }
  162. },
  163. "18": {
  164. "inputs": {
  165. "text": "refcontrolpose change pose to photo with reference from left side",
  166. "clip": [
  167. "25",
  168. 1
  169. ]
  170. },
  171. "class_type": "CLIPTextEncode",
  172. "_meta": {
  173. "title": "CLIP Text Encode (Positive Prompt)"
  174. }
  175. },
  176. "19": {
  177. "inputs": {
  178. "samples": [
  179. "9",
  180. 0
  181. ],
  182. "vae": [
  183. "1",
  184. 0
  185. ]
  186. },
  187. "class_type": "VAEDecode",
  188. "_meta": {
  189. "title": "VAE Decode"
  190. }
  191. },
  192. "20": {
  193. "inputs": {
  194. "image": [
  195. "14",
  196. 0
  197. ]
  198. },
  199. "class_type": "FluxKontextImageScale",
  200. "_meta": {
  201. "title": "FluxKontextImageScale"
  202. }
  203. },
  204. "21": {
  205. "inputs": {
  206. "filename_prefix": "ComfyUI",
  207. "images": [
  208. "23",
  209. 0
  210. ]
  211. },
  212. "class_type": "SaveImage",
  213. "_meta": {
  214. "title": "Save Image"
  215. }
  216. },
  217. "22": {
  218. "inputs": {
  219. "filename_prefix": "ComfyUI",
  220. "images": [
  221. "20",
  222. 0
  223. ]
  224. },
  225. "class_type": "SaveImage",
  226. "_meta": {
  227. "title": "Save Image"
  228. }
  229. },
  230. "23": {
  231. "inputs": {
  232. "direction": "down",
  233. "match_image_size": true,
  234. "image1": [
  235. "20",
  236. 0
  237. ],
  238. "image2": [
  239. "19",
  240. 0
  241. ]
  242. },
  243. "class_type": "ImageConcanate",
  244. "_meta": {
  245. "title": "Image Concatenate"
  246. }
  247. },
  248. "24": {
  249. "inputs": {
  250. "filename_prefix": "ComfyUI",
  251. "images": [
  252. "19",
  253. 0
  254. ]
  255. },
  256. "class_type": "SaveImage",
  257. "_meta": {
  258. "title": "Save Image"
  259. }
  260. },
  261. "25": {
  262. "inputs": {
  263. "lora_name": "refcontrol_pose.safetensors",
  264. "strength_model": 1,
  265. "strength_clip": 1,
  266. "model": [
  267. "12",
  268. 0
  269. ],
  270. "clip": [
  271. "13",
  272. 0
  273. ]
  274. },
  275. "class_type": "LoraLoader",
  276. "_meta": {
  277. "title": "Load LoRA (Model and CLIP)"
  278. }
  279. },
  280. "28": {
  281. "inputs": {
  282. "image": "2.png"
  283. },
  284. "class_type": "LoadImage",
  285. "_meta": {
  286. "title": "Load Image (Pose)"
  287. }
  288. },
  289. "29": {
  290. "inputs": {
  291. "image": "ref.jpg"
  292. },
  293. "class_type": "LoadImage",
  294. "_meta": {
  295. "title": "Load Image (Reference)"
  296. }
  297. },
  298. "30": {
  299. "inputs": {
  300. "detect_hand": "enable",
  301. "detect_body": "enable",
  302. "detect_face": "enable",
  303. "resolution": 1024,
  304. "scale_stick_for_xinsr_cn": "disable",
  305. "image": [
  306. "28",
  307. 0
  308. ]
  309. },
  310. "class_type": "OpenposePreprocessor",
  311. "_meta": {
  312. "title": "OpenPose Pose"
  313. }
  314. }
  315. }