hidream_full_example_api.json 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. {
  2. "3": {
  3. "inputs": {
  4. "seed": 221267224284097,
  5. "steps": 30,
  6. "cfg": 5.0,
  7. "sampler_name": "euler",
  8. "scheduler": "simple",
  9. "denoise": 1.0,
  10. "model": [
  11. "70",
  12. 0
  13. ],
  14. "positive": [
  15. "16",
  16. 0
  17. ],
  18. "negative": [
  19. "40",
  20. 0
  21. ],
  22. "latent_image": [
  23. "53",
  24. 0
  25. ]
  26. },
  27. "class_type": "KSampler",
  28. "_meta": {
  29. "title": "KSampler"
  30. }
  31. },
  32. "8": {
  33. "inputs": {
  34. "samples": [
  35. "3",
  36. 0
  37. ],
  38. "vae": [
  39. "55",
  40. 0
  41. ]
  42. },
  43. "class_type": "VAEDecode",
  44. "_meta": {
  45. "title": "VAE Decode"
  46. }
  47. },
  48. "9": {
  49. "inputs": {
  50. "filename_prefix": "ComfyUI",
  51. "images": [
  52. "8",
  53. 0
  54. ]
  55. },
  56. "class_type": "SaveImage",
  57. "_meta": {
  58. "title": "Save Image"
  59. }
  60. },
  61. "16": {
  62. "inputs": {
  63. "text": "anime girl with massive fennec ears and a big fluffy fox tail with long wavy blonde hair and blue eyes wearing a pink sweater a large oversized black winter coat and a long blue maxi skirt and large winter boots and a red scarf and large gloves sitting in a sled sledding fast down a snow mountain",
  64. "clip": [
  65. "54",
  66. 0
  67. ]
  68. },
  69. "class_type": "CLIPTextEncode",
  70. "_meta": {
  71. "title": "Positive Prompt"
  72. }
  73. },
  74. "40": {
  75. "inputs": {
  76. "text": "blurry",
  77. "clip": [
  78. "54",
  79. 0
  80. ]
  81. },
  82. "class_type": "CLIPTextEncode",
  83. "_meta": {
  84. "title": "Negative Prompt"
  85. }
  86. },
  87. "53": {
  88. "inputs": {
  89. "width": 1024,
  90. "height": 1024,
  91. "batch_size": 1
  92. },
  93. "class_type": "EmptySD3LatentImage",
  94. "_meta": {
  95. "title": "EmptySD3LatentImage"
  96. }
  97. },
  98. "54": {
  99. "inputs": {
  100. "clip_name1": "clip_l_hidream.safetensors",
  101. "clip_name2": "clip_g_hidream.safetensors",
  102. "clip_name3": "t5xxl_fp8_e4m3fn_scaled.safetensors",
  103. "clip_name4": "llama_3.1_8b_instruct_fp8_scaled.safetensors"
  104. },
  105. "class_type": "QuadrupleCLIPLoader",
  106. "_meta": {
  107. "title": "QuadrupleCLIPLoader"
  108. }
  109. },
  110. "55": {
  111. "inputs": {
  112. "vae_name": "ae.safetensors"
  113. },
  114. "class_type": "VAELoader",
  115. "_meta": {
  116. "title": "Load VAE"
  117. }
  118. },
  119. "69": {
  120. "inputs": {
  121. "unet_name": "hidream_i1_full_fp16.safetensors",
  122. "weight_dtype": "default"
  123. },
  124. "class_type": "UNETLoader",
  125. "_meta": {
  126. "title": "Load Diffusion Model"
  127. }
  128. },
  129. "70": {
  130. "inputs": {
  131. "shift": 3.0000000000000004,
  132. "model": [
  133. "69",
  134. 0
  135. ]
  136. },
  137. "class_type": "ModelSamplingSD3",
  138. "_meta": {
  139. "title": "ModelSamplingSD3"
  140. }
  141. }
  142. }