chroma_example_api.json 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. {
  2. "3": {
  3. "inputs": {
  4. "seed": 1110468078805530,
  5. "steps": 30,
  6. "cfg": 4.0,
  7. "sampler_name": "euler",
  8. "scheduler": "simple",
  9. "denoise": 1.0,
  10. "model": [
  11. "13",
  12. 0
  13. ],
  14. "positive": [
  15. "6",
  16. 0
  17. ],
  18. "negative": [
  19. "7",
  20. 0
  21. ],
  22. "latent_image": [
  23. "12",
  24. 0
  25. ]
  26. },
  27. "class_type": "KSampler",
  28. "_meta": {
  29. "title": "KSampler"
  30. }
  31. },
  32. "6": {
  33. "inputs": {
  34. "text": "aesthetic 11, anime rpg game style, cute anime girl with gigantic fennec ears and a big fluffy fox tail with long wavy blonde hair and large blue eyes blonde colored eyelashes wearing a pink sweater a large oversized gold trimmed black winter coat and a long blue maxi skirt and large winter boots and a red scarf and large gloves dirty clothes muddy clothes, she is happy and holding a sword in a winter forest with evergreen trees there are the beautiful snow mountains in the background",
  35. "clip": [
  36. "11",
  37. 0
  38. ]
  39. },
  40. "class_type": "CLIPTextEncode",
  41. "_meta": {
  42. "title": "CLIP Text Encode (Prompt)"
  43. }
  44. },
  45. "7": {
  46. "inputs": {
  47. "text": "low quality, bad anatomy, extra digits, missing digits, extra limbs, missing limbs",
  48. "clip": [
  49. "11",
  50. 0
  51. ]
  52. },
  53. "class_type": "CLIPTextEncode",
  54. "_meta": {
  55. "title": "CLIP Text Encode (Prompt)"
  56. }
  57. },
  58. "8": {
  59. "inputs": {
  60. "samples": [
  61. "3",
  62. 0
  63. ],
  64. "vae": [
  65. "15",
  66. 0
  67. ]
  68. },
  69. "class_type": "VAEDecode",
  70. "_meta": {
  71. "title": "VAE Decode"
  72. }
  73. },
  74. "9": {
  75. "inputs": {
  76. "filename_prefix": "ComfyUI",
  77. "images": [
  78. "8",
  79. 0
  80. ]
  81. },
  82. "class_type": "SaveImage",
  83. "_meta": {
  84. "title": "Save Image"
  85. }
  86. },
  87. "10": {
  88. "inputs": {
  89. "clip_name": "t5xxl_fp16.safetensors",
  90. "type": "chroma",
  91. "device": "default"
  92. },
  93. "class_type": "CLIPLoader",
  94. "_meta": {
  95. "title": "Load CLIP"
  96. }
  97. },
  98. "11": {
  99. "inputs": {
  100. "min_padding": 0,
  101. "min_length": 3,
  102. "clip": [
  103. "10",
  104. 0
  105. ]
  106. },
  107. "class_type": "T5TokenizerOptions",
  108. "_meta": {
  109. "title": "T5TokenizerOptions"
  110. }
  111. },
  112. "12": {
  113. "inputs": {
  114. "width": 1024,
  115. "height": 1024,
  116. "batch_size": 1
  117. },
  118. "class_type": "EmptySD3LatentImage",
  119. "_meta": {
  120. "title": "EmptySD3LatentImage"
  121. }
  122. },
  123. "13": {
  124. "inputs": {
  125. "unet_name": "Chroma1-HD.safetensors",
  126. "weight_dtype": "default"
  127. },
  128. "class_type": "UNETLoader",
  129. "_meta": {
  130. "title": "Load Diffusion Model"
  131. }
  132. },
  133. "15": {
  134. "inputs": {
  135. "vae_name": "ae.safetensors"
  136. },
  137. "class_type": "VAELoader",
  138. "_meta": {
  139. "title": "Load VAE"
  140. }
  141. }
  142. }