embedding_example_api.json 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. {
  2. "3": {
  3. "inputs": {
  4. "seed": 193694018275622,
  5. "steps": 20,
  6. "cfg": 8.0,
  7. "sampler_name": "uni_pc_bh2",
  8. "scheduler": "normal",
  9. "denoise": 1.0,
  10. "model": [
  11. "4",
  12. 0
  13. ],
  14. "positive": [
  15. "6",
  16. 0
  17. ],
  18. "negative": [
  19. "7",
  20. 0
  21. ],
  22. "latent_image": [
  23. "5",
  24. 0
  25. ]
  26. },
  27. "class_type": "KSampler"
  28. },
  29. "4": {
  30. "inputs": {
  31. "ckpt_name": "v2-1_768-ema-pruned.ckpt"
  32. },
  33. "class_type": "CheckpointLoaderSimple"
  34. },
  35. "5": {
  36. "inputs": {
  37. "width": 768,
  38. "height": 768,
  39. "batch_size": 1
  40. },
  41. "class_type": "EmptyLatentImage"
  42. },
  43. "6": {
  44. "inputs": {
  45. "text": "photograph in the style of embedding:SDA768.pt girl with blonde hair\nlandscape scenery view",
  46. "clip": [
  47. "4",
  48. 1
  49. ]
  50. },
  51. "class_type": "CLIPTextEncode"
  52. },
  53. "7": {
  54. "inputs": {
  55. "text": "bad hands",
  56. "clip": [
  57. "4",
  58. 1
  59. ]
  60. },
  61. "class_type": "CLIPTextEncode"
  62. },
  63. "8": {
  64. "inputs": {
  65. "samples": [
  66. "3",
  67. 0
  68. ],
  69. "vae": [
  70. "4",
  71. 2
  72. ]
  73. },
  74. "class_type": "VAEDecode"
  75. },
  76. "9": {
  77. "inputs": {
  78. "filename_prefix": "ComfyUI",
  79. "images": [
  80. "8",
  81. 0
  82. ]
  83. },
  84. "class_type": "SaveImage"
  85. }
  86. }