flux_dev_example_api.json 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. {
  2. "6": {
  3. "inputs": {
  4. "text": "cute anime girl with massive fluffy fennec ears and a big fluffy tail blonde messy long hair blue eyes wearing a maid outfit with a long black gold leaf pattern dress and a white apron mouth open holding a fancy black forest cake with candles on top in the kitchen of an old dark Victorian mansion lit by candlelight with a bright window to the foggy forest and very expensive stuff everywhere",
  5. "clip": [
  6. "11",
  7. 0
  8. ]
  9. },
  10. "class_type": "CLIPTextEncode"
  11. },
  12. "8": {
  13. "inputs": {
  14. "samples": [
  15. "13",
  16. 0
  17. ],
  18. "vae": [
  19. "10",
  20. 0
  21. ]
  22. },
  23. "class_type": "VAEDecode"
  24. },
  25. "9": {
  26. "inputs": {
  27. "filename_prefix": "ComfyUI",
  28. "images": [
  29. "8",
  30. 0
  31. ]
  32. },
  33. "class_type": "SaveImage"
  34. },
  35. "10": {
  36. "inputs": {
  37. "vae_name": "ae.safetensors"
  38. },
  39. "class_type": "VAELoader"
  40. },
  41. "11": {
  42. "inputs": {
  43. "clip_name1": "t5xxl_fp16.safetensors",
  44. "clip_name2": "clip_l.safetensors",
  45. "type": "flux"
  46. },
  47. "class_type": "DualCLIPLoader"
  48. },
  49. "12": {
  50. "inputs": {
  51. "unet_name": "flux1-dev.safetensors",
  52. "weight_dtype": "default"
  53. },
  54. "class_type": "UNETLoader"
  55. },
  56. "13": {
  57. "inputs": {
  58. "noise": [
  59. "25",
  60. 0
  61. ],
  62. "guider": [
  63. "22",
  64. 0
  65. ],
  66. "sampler": [
  67. "16",
  68. 0
  69. ],
  70. "sigmas": [
  71. "17",
  72. 0
  73. ],
  74. "latent_image": [
  75. "27",
  76. 0
  77. ]
  78. },
  79. "class_type": "SamplerCustomAdvanced"
  80. },
  81. "16": {
  82. "inputs": {
  83. "sampler_name": "euler"
  84. },
  85. "class_type": "KSamplerSelect"
  86. },
  87. "17": {
  88. "inputs": {
  89. "scheduler": "simple",
  90. "steps": 20,
  91. "denoise": 1.0,
  92. "model": [
  93. "30",
  94. 0
  95. ]
  96. },
  97. "class_type": "BasicScheduler"
  98. },
  99. "22": {
  100. "inputs": {
  101. "model": [
  102. "30",
  103. 0
  104. ],
  105. "conditioning": [
  106. "26",
  107. 0
  108. ]
  109. },
  110. "class_type": "BasicGuider"
  111. },
  112. "25": {
  113. "inputs": {
  114. "noise_seed": 219670278747233
  115. },
  116. "class_type": "RandomNoise"
  117. },
  118. "26": {
  119. "inputs": {
  120. "guidance": 3.5,
  121. "conditioning": [
  122. "6",
  123. 0
  124. ]
  125. },
  126. "class_type": "FluxGuidance"
  127. },
  128. "27": {
  129. "inputs": {
  130. "width": 1024,
  131. "height": 1024,
  132. "batch_size": 1
  133. },
  134. "class_type": "EmptySD3LatentImage"
  135. },
  136. "30": {
  137. "inputs": {
  138. "max_shift": 1.15,
  139. "base_shift": 0.5,
  140. "width": 1024,
  141. "height": 1024,
  142. "model": [
  143. "12",
  144. 0
  145. ]
  146. },
  147. "class_type": "ModelSamplingFlux"
  148. }
  149. }