{"KSampler": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model used for denoising the input latent."}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true, "tooltip": "The random seed used for creating the noise."}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "The number of steps used in the denoising process."}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01, "tooltip": "The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality."}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"tooltip": "The algorithm used when sampling, this can affect the quality, speed, and style of the generated output."}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], {"tooltip": "The scheduler controls how noise is gradually removed to form the image."}], "positive": ["CONDITIONING", {"tooltip": "The conditioning describing the attributes you want to include in the image."}], "negative": ["CONDITIONING", {"tooltip": "The conditioning describing the attributes you want to exclude from the image."}], "latent_image": ["LATENT", {"tooltip": "The latent image to denoise."}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling."}]}}, "input_order": {"required": ["model", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "latent_image", "denoise"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "KSampler", "display_name": "KSampler", "description": "Uses the provided model, positive and negative conditioning to denoise the latent image.", "python_module": "nodes", "category": "sampling", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The denoised latent."], "search_aliases": ["sampler", "sample", "generate", "denoise", "diffuse", "txt2img", "img2img"]}, "CheckpointLoaderSimple": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"], {"tooltip": "The name of the checkpoint (model) to load."}]}}, "input_order": {"required": ["ckpt_name"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "VAE"], "output_is_list": [false, false, false], "output_name": ["MODEL", "CLIP", "VAE"], "name": "CheckpointLoaderSimple", "display_name": "Load Checkpoint", "description": "Loads a diffusion model checkpoint, diffusion models are used to denoise latents.", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The model used for denoising latents.", "The CLIP model used for encoding text prompts.", "The VAE model used for encoding and decoding images to and from latent space."], "search_aliases": ["load model", "checkpoint", "model loader", "load checkpoint", "ckpt", "model"]}, "CLIPTextEncode": {"input": {"required": {"text": ["STRING", {"multiline": true, "dynamicPrompts": true, "tooltip": "The text to be encoded."}], "clip": ["CLIP", {"tooltip": "The CLIP model used for encoding the text."}]}}, "input_order": {"required": ["text", "clip"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "CLIPTextEncode", "display_name": "CLIP Text Encode (Prompt)", "description": "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["A conditioning containing the embedded text used to guide the diffusion model."], "search_aliases": ["text", "prompt", "text prompt", "positive prompt", "negative prompt", "encode text", "text encoder", "encode prompt"]}, "CLIPSetLastLayer": {"input": {"required": {"clip": ["CLIP"], "stop_at_clip_layer": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1, "advanced": true}]}}, "input_order": {"required": ["clip", "stop_at_clip_layer"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "CLIPSetLastLayer", "display_name": "CLIP Set Last Layer", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VAEDecode": {"input": {"required": {"samples": ["LATENT", {"tooltip": "The latent to be decoded."}], "vae": ["VAE", {"tooltip": "The VAE model used for decoding the latent."}]}}, "input_order": {"required": ["samples", "vae"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "VAEDecode", "display_name": "VAE Decode", "description": "Decodes latent images back into pixel space images.", "python_module": "nodes", "category": "latent", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The decoded image."], "search_aliases": ["decode", "decode latent", "latent to image", "render latent"]}, "VAEEncode": {"input": {"required": {"pixels": ["IMAGE"], "vae": ["VAE"]}}, "input_order": {"required": ["pixels", "vae"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "VAEEncode", "display_name": "VAE Encode", "description": "", "python_module": "nodes", "category": "latent", "output_node": false, "has_intermediate_output": false, "search_aliases": ["encode", "encode image", "image to latent"]}, "VAEEncodeForInpaint": {"input": {"required": {"pixels": ["IMAGE"], "vae": ["VAE"], "mask": ["MASK"], "grow_mask_by": ["INT", {"default": 6, "min": 0, "max": 64, "step": 1}]}}, "input_order": {"required": ["pixels", "vae", "mask", "grow_mask_by"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "VAEEncodeForInpaint", "display_name": "VAE Encode (for Inpainting)", "description": "", "python_module": "nodes", "category": "latent/inpaint", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VAELoader": {"input": {"required": {"vae_name": [["FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors", "taesd", "taesdxl", "taesd3", "taef1", "pixel_space"]]}}, "input_order": {"required": ["vae_name"]}, "is_input_list": false, "output": ["VAE"], "output_is_list": [false], "output_name": ["VAE"], "name": "VAELoader", "display_name": "Load VAE", "description": "", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "EmptyLatentImage": {"input": {"required": {"width": ["INT", {"default": 512, "min": 16, "max": 16384, "step": 8, "tooltip": "The width of the latent images in pixels."}], "height": ["INT", {"default": 512, "min": 16, "max": 16384, "step": 8, "tooltip": "The height of the latent images in pixels."}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}]}}, "input_order": {"required": ["width", "height", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "EmptyLatentImage", "display_name": "Empty Latent Image", "description": "Create a new batch of empty latent images to be denoised via sampling.", "python_module": "nodes", "category": "latent", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The empty latent image batch."], "search_aliases": ["empty", "empty latent", "new latent", "create latent", "blank latent", "blank"]}, "LatentUpscale": {"input": {"required": {"samples": ["LATENT"], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]], "width": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 8}], "height": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 8}], "crop": [["disabled", "center"]]}}, "input_order": {"required": ["samples", "upscale_method", "width", "height", "crop"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentUpscale", "display_name": "Upscale Latent", "description": "", "python_module": "nodes", "category": "latent", "output_node": false, "has_intermediate_output": false, "search_aliases": ["enlarge latent", "resize latent"]}, "LatentUpscaleBy": {"input": {"required": {"samples": ["LATENT"], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]], "scale_by": ["FLOAT", {"default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01}]}}, "input_order": {"required": ["samples", "upscale_method", "scale_by"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentUpscaleBy", "display_name": "Upscale Latent By", "description": "", "python_module": "nodes", "category": "latent", "output_node": false, "has_intermediate_output": false, "search_aliases": ["enlarge latent", "resize latent", "scale latent"]}, "LatentFromBatch": {"input": {"required": {"samples": ["LATENT"], "batch_index": ["INT", {"default": 0, "min": 0, "max": 63}], "length": ["INT", {"default": 1, "min": 1, "max": 64}]}}, "input_order": {"required": ["samples", "batch_index", "length"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentFromBatch", "display_name": "Latent From Batch", "description": "", "python_module": "nodes", "category": "latent/batch", "output_node": false, "has_intermediate_output": false, "search_aliases": ["select from batch", "pick latent", "batch subset"]}, "RepeatLatentBatch": {"input": {"required": {"samples": ["LATENT"], "amount": ["INT", {"default": 1, "min": 1, "max": 64}]}}, "input_order": {"required": ["samples", "amount"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "RepeatLatentBatch", "display_name": "Repeat Latent Batch", "description": "", "python_module": "nodes", "category": "latent/batch", "output_node": false, "has_intermediate_output": false, "search_aliases": ["duplicate latent", "clone latent"]}, "SaveImage": {"input": {"required": {"images": ["IMAGE", {"tooltip": "The images to save."}], "filename_prefix": ["STRING", {"default": "ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "SaveImage", "display_name": "Save Image", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "nodes", "category": "image", "output_node": true, "has_intermediate_output": false, "search_aliases": ["save", "save image", "export image", "output image", "write image", "download"], "essentials_category": "Basics"}, "PreviewImage": {"input": {"required": {"images": ["IMAGE"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "PreviewImage", "display_name": "Preview Image", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "nodes", "category": "image", "output_node": true, "has_intermediate_output": false, "search_aliases": ["preview", "preview image", "show image", "view image", "display image", "image viewer"], "essentials_category": "Basics"}, "LoadImage": {"input": {"required": {"image": [["2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"], {"image_upload": true}]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "LoadImage", "display_name": "Load Image", "description": "", "python_module": "nodes", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": ["load image", "open image", "import image", "image input", "upload image", "read image", "image loader"], "essentials_category": "Basics"}, "LoadImageMask": {"input": {"required": {"image": [["2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"], {"image_upload": true}], "channel": [["alpha", "red", "green", "blue"]]}}, "input_order": {"required": ["image", "channel"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "LoadImageMask", "display_name": "Load Image (as Mask)", "description": "", "python_module": "nodes", "category": "mask", "output_node": false, "has_intermediate_output": false, "search_aliases": ["import mask", "alpha mask", "channel mask"], "essentials_category": "Image Tools"}, "LoadImageOutput": {"input": {"required": {"image": ["COMBO", {"image_upload": true, "image_folder": "output", "remote": {"route": "/internal/files/output", "refresh_button": true, "control_after_refresh": "first"}}]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "LoadImageOutput", "display_name": "Load Image (from Outputs)", "description": "Load an image from the output folder. When the refresh button is clicked, the node will update the image list and automatically select the first image, allowing for easy iteration.", "python_module": "nodes", "category": "image", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": ["output image", "previous generation"], "essentials_category": "Basics"}, "ImageScale": {"input": {"required": {"image": ["IMAGE"], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]], "width": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1}], "height": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1}], "crop": [["disabled", "center"]]}}, "input_order": {"required": ["image", "upscale_method", "width", "height", "crop"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageScale", "display_name": "Upscale Image", "description": "", "python_module": "nodes", "category": "image/upscaling", "output_node": false, "has_intermediate_output": false, "search_aliases": ["resize", "resize image", "scale image", "image resize", "zoom", "zoom in", "change size"], "essentials_category": "Image Tools"}, "ImageScaleBy": {"input": {"required": {"image": ["IMAGE"], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]], "scale_by": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}]}}, "input_order": {"required": ["image", "upscale_method", "scale_by"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageScaleBy", "display_name": "Upscale Image By", "description": "", "python_module": "nodes", "category": "image/upscaling", "output_node": false, "has_intermediate_output": false, "search_aliases": [], "essentials_category": "Image Tools"}, "ImageInvert": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageInvert", "display_name": "Invert Image", "description": "", "python_module": "nodes", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": ["reverse colors"], "essentials_category": "Image Tools"}, "ImageBatch": {"input": {"required": {"image1": ["IMAGE"], "image2": ["IMAGE"]}}, "input_order": {"required": ["image1", "image2"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageBatch", "display_name": "Batch Images", "description": "", "python_module": "nodes", "category": "image", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": ["combine images", "merge images", "stack images"]}, "ImagePadForOutpaint": {"input": {"required": {"image": ["IMAGE"], "left": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "top": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "right": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "bottom": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "feathering": ["INT", {"default": 40, "min": 0, "max": 16384, "step": 1, "advanced": true}]}}, "input_order": {"required": ["image", "left", "top", "right", "bottom", "feathering"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "ImagePadForOutpaint", "display_name": "Pad Image for Outpainting", "description": "", "python_module": "nodes", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": ["extend canvas", "expand image"]}, "EmptyImage": {"input": {"required": {"width": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "height": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "color": ["INT", {"default": 0, "min": 0, "max": 16777215, "step": 1, "display": "color"}]}}, "input_order": {"required": ["width", "height", "batch_size", "color"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "EmptyImage", "display_name": "EmptyImage", "description": "", "python_module": "nodes", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningAverage": {"input": {"required": {"conditioning_to": ["CONDITIONING"], "conditioning_from": ["CONDITIONING"], "conditioning_to_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["conditioning_to", "conditioning_from", "conditioning_to_strength"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningAverage", "display_name": "ConditioningAverage", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": ["blend prompts", "interpolate conditioning", "mix prompts", "style fusion", "weighted blend"]}, "ConditioningCombine": {"input": {"required": {"conditioning_1": ["CONDITIONING"], "conditioning_2": ["CONDITIONING"]}}, "input_order": {"required": ["conditioning_1", "conditioning_2"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningCombine", "display_name": "Conditioning (Combine)", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": ["combine", "merge conditioning", "combine prompts", "merge prompts", "mix prompts", "add prompt"], "essentials_category": "Image Generation"}, "ConditioningConcat": {"input": {"required": {"conditioning_to": ["CONDITIONING"], "conditioning_from": ["CONDITIONING"]}}, "input_order": {"required": ["conditioning_to", "conditioning_from"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningConcat", "display_name": "Conditioning (Concat)", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningSetArea": {"input": {"required": {"conditioning": ["CONDITIONING"], "width": ["INT", {"default": 64, "min": 64, "max": 16384, "step": 8}], "height": ["INT", {"default": 64, "min": 64, "max": 16384, "step": 8}], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["conditioning", "width", "height", "x", "y", "strength"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetArea", "display_name": "Conditioning (Set Area)", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": ["regional prompt", "area prompt", "spatial conditioning", "localized prompt"]}, "ConditioningSetAreaPercentage": {"input": {"required": {"conditioning": ["CONDITIONING"], "width": ["FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}], "height": ["FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}], "x": ["FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}], "y": ["FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["conditioning", "width", "height", "x", "y", "strength"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetAreaPercentage", "display_name": "Conditioning (Set Area with Percentage)", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningSetAreaStrength": {"input": {"required": {"conditioning": ["CONDITIONING"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["conditioning", "strength"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetAreaStrength", "display_name": "ConditioningSetAreaStrength", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningSetMask": {"input": {"required": {"conditioning": ["CONDITIONING"], "mask": ["MASK"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}}, "input_order": {"required": ["conditioning", "mask", "strength", "set_cond_area"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetMask", "display_name": "Conditioning (Set Mask)", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": ["masked prompt", "regional inpaint conditioning", "mask conditioning"]}, "KSamplerAdvanced": {"input": {"required": {"model": ["MODEL"], "add_noise": [["enable", "disable"], {"advanced": true}], "noise_seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "latent_image": ["LATENT"], "start_at_step": ["INT", {"default": 0, "min": 0, "max": 10000, "advanced": true}], "end_at_step": ["INT", {"default": 10000, "min": 0, "max": 10000, "advanced": true}], "return_with_leftover_noise": [["disable", "enable"], {"advanced": true}]}}, "input_order": {"required": ["model", "add_noise", "noise_seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "latent_image", "start_at_step", "end_at_step", "return_with_leftover_noise"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "KSamplerAdvanced", "display_name": "KSampler (Advanced)", "description": "", "python_module": "nodes", "category": "sampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SetLatentNoiseMask": {"input": {"required": {"samples": ["LATENT"], "mask": ["MASK"]}}, "input_order": {"required": ["samples", "mask"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "SetLatentNoiseMask", "display_name": "Set Latent Noise Mask", "description": "", "python_module": "nodes", "category": "latent/inpaint", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LatentComposite": {"input": {"required": {"samples_to": ["LATENT"], "samples_from": ["LATENT"], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "feather": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}]}}, "input_order": {"required": ["samples_to", "samples_from", "x", "y", "feather"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentComposite", "display_name": "Latent Composite", "description": "", "python_module": "nodes", "category": "latent", "output_node": false, "has_intermediate_output": false, "search_aliases": ["overlay latent", "layer latent", "paste latent"]}, "LatentBlend": {"input": {"required": {"samples1": ["LATENT"], "samples2": ["LATENT"], "blend_factor": ["FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.01}]}}, "input_order": {"required": ["samples1", "samples2", "blend_factor"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentBlend", "display_name": "Latent Blend", "description": "", "python_module": "nodes", "category": "_for_testing", "output_node": false, "has_intermediate_output": false, "search_aliases": ["mix latents", "interpolate latents"]}, "LatentRotate": {"input": {"required": {"samples": ["LATENT"], "rotation": [["none", "90 degrees", "180 degrees", "270 degrees"]]}}, "input_order": {"required": ["samples", "rotation"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentRotate", "display_name": "Rotate Latent", "description": "", "python_module": "nodes", "category": "latent/transform", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LatentFlip": {"input": {"required": {"samples": ["LATENT"], "flip_method": [["x-axis: vertically", "y-axis: horizontally"]]}}, "input_order": {"required": ["samples", "flip_method"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentFlip", "display_name": "Flip Latent", "description": "", "python_module": "nodes", "category": "latent/transform", "output_node": false, "has_intermediate_output": false, "search_aliases": ["mirror latent"]}, "LatentCrop": {"input": {"required": {"samples": ["LATENT"], "width": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "height": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}]}}, "input_order": {"required": ["samples", "width", "height", "x", "y"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentCrop", "display_name": "Crop Latent", "description": "", "python_module": "nodes", "category": "latent/transform", "output_node": false, "has_intermediate_output": false, "search_aliases": ["trim latent", "cut latent"]}, "LoraLoader": {"input": {"required": {"model": ["MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}], "clip": ["CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}], "lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"tooltip": "The name of the LoRA."}], "strength_model": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}], "strength_clip": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}]}}, "input_order": {"required": ["model", "clip", "lora_name", "strength_model", "strength_clip"]}, "is_input_list": false, "output": ["MODEL", "CLIP"], "output_is_list": [false, false], "output_name": ["MODEL", "CLIP"], "name": "LoraLoader", "display_name": "Load LoRA (Model and CLIP)", "description": "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model.", "The modified CLIP model."], "search_aliases": ["lora", "load lora", "apply lora", "lora loader", "lora model"], "essentials_category": "Image Generation"}, "CLIPLoader": {"input": {"required": {"clip_name": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "type": [["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis", "longcat_image"]]}, "optional": {"device": [["default", "cpu"], {"advanced": true}]}}, "input_order": {"required": ["clip_name", "type"], "optional": ["device"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "CLIPLoader", "display_name": "Load CLIP", "description": "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B", "python_module": "nodes", "category": "advanced/loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "UNETLoader": {"input": {"required": {"unet_name": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-dev.sft", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux1-schnell.sft", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan3d-dit-v2-1/model.fp16.ckpt", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors"]], "weight_dtype": [["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2"], {"advanced": true}]}}, "input_order": {"required": ["unet_name", "weight_dtype"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "UNETLoader", "display_name": "Load Diffusion Model", "description": "", "python_module": "nodes", "category": "advanced/loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DualCLIPLoader": {"input": {"required": {"clip_name1": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "clip_name2": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "type": [["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15", "kandinsky5", "kandinsky5_image", "ltxv", "newbie", "ace"]]}, "optional": {"device": [["default", "cpu"], {"advanced": true}]}}, "input_order": {"required": ["clip_name1", "clip_name2", "type"], "optional": ["device"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "DualCLIPLoader", "display_name": "DualCLIPLoader", "description": "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5\nhidream: at least one of t5 or llama, recommended t5 and llama\nhunyuan_image: qwen2.5vl 7b and byt5 small\nnewbie: gemma-3-4b-it, jina clip v2", "python_module": "nodes", "category": "advanced/loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CLIPVisionEncode": {"input": {"required": {"clip_vision": ["CLIP_VISION"], "image": ["IMAGE"], "crop": [["center", "none"]]}}, "input_order": {"required": ["clip_vision", "image", "crop"]}, "is_input_list": false, "output": ["CLIP_VISION_OUTPUT"], "output_is_list": [false], "output_name": ["CLIP_VISION_OUTPUT"], "name": "CLIPVisionEncode", "display_name": "CLIP Vision Encode", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StyleModelApply": {"input": {"required": {"conditioning": ["CONDITIONING"], "style_model": ["STYLE_MODEL"], "clip_vision_output": ["CLIP_VISION_OUTPUT"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}], "strength_type": [["multiply", "attn_bias"]]}}, "input_order": {"required": ["conditioning", "style_model", "clip_vision_output", "strength", "strength_type"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "StyleModelApply", "display_name": "Apply Style Model", "description": "", "python_module": "nodes", "category": "conditioning/style_model", "output_node": false, "has_intermediate_output": false, "search_aliases": ["style transfer"]}, "unCLIPConditioning": {"input": {"required": {"conditioning": ["CONDITIONING"], "clip_vision_output": ["CLIP_VISION_OUTPUT"], "strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "noise_augmentation": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["conditioning", "clip_vision_output", "strength", "noise_augmentation"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "unCLIPConditioning", "display_name": "unCLIPConditioning", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ControlNetApply": {"input": {"required": {"conditioning": ["CONDITIONING"], "control_net": ["CONTROL_NET"], "image": ["IMAGE"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["conditioning", "control_net", "image", "strength"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ControlNetApply", "display_name": "Apply ControlNet (OLD)", "description": "", "python_module": "nodes", "category": "conditioning/controlnet", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "ControlNetApplyAdvanced": {"input": {"required": {"positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "control_net": ["CONTROL_NET"], "image": ["IMAGE"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}, "optional": {"vae": ["VAE"]}}, "input_order": {"required": ["positive", "negative", "control_net", "image", "strength", "start_percent", "end_percent"], "optional": ["vae"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "name": "ControlNetApplyAdvanced", "display_name": "Apply ControlNet", "description": "", "python_module": "nodes", "category": "conditioning/controlnet", "output_node": false, "has_intermediate_output": false, "search_aliases": ["controlnet", "apply controlnet", "use controlnet", "control net"]}, "ControlNetLoader": {"input": {"required": {"control_net_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]]}}, "input_order": {"required": ["control_net_name"]}, "is_input_list": false, "output": ["CONTROL_NET"], "output_is_list": [false], "output_name": ["CONTROL_NET"], "name": "ControlNetLoader", "display_name": "Load ControlNet Model", "description": "", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": ["controlnet", "control net", "cn", "load controlnet", "controlnet loader"]}, "DiffControlNetLoader": {"input": {"required": {"model": ["MODEL"], "control_net_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]]}}, "input_order": {"required": ["model", "control_net_name"]}, "is_input_list": false, "output": ["CONTROL_NET"], "output_is_list": [false], "output_name": ["CONTROL_NET"], "name": "DiffControlNetLoader", "display_name": "Load ControlNet Model (diff)", "description": "", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StyleModelLoader": {"input": {"required": {"style_model_name": [["flex1_redux_siglip2_512.safetensors", "flux/flux1-redux-dev.safetensors", "flux1-redux-dev.safetensors", "sd1/coadapter-style-sd15v1.pth"]]}}, "input_order": {"required": ["style_model_name"]}, "is_input_list": false, "output": ["STYLE_MODEL"], "output_is_list": [false], "output_name": ["STYLE_MODEL"], "name": "StyleModelLoader", "display_name": "Load Style Model", "description": "", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CLIPVisionLoader": {"input": {"required": {"clip_name": [["CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors", "CLIP-ViT-H-14-laion2B-s32B-b79K/model.safetensors", "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_vision_g.safetensors", "clip_vision_h.safetensors", "clip_vision_l.safetensors", "llava_llama3_vision.safetensors", "sd1/model.safetensors", "sd1/pytorch_model.bin", "sdxl/model.safetensors", "sdxl/pytorch_model.bin", "sigclip_vision_patch14_384.safetensors"]]}}, "input_order": {"required": ["clip_name"]}, "is_input_list": false, "output": ["CLIP_VISION"], "output_is_list": [false], "output_name": ["CLIP_VISION"], "name": "CLIPVisionLoader", "display_name": "Load CLIP Vision", "description": "", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VAEDecodeTiled": {"input": {"required": {"samples": ["LATENT"], "vae": ["VAE"], "tile_size": ["INT", {"default": 512, "min": 64, "max": 4096, "step": 32, "advanced": true}], "overlap": ["INT", {"default": 64, "min": 0, "max": 4096, "step": 32, "advanced": true}], "temporal_size": ["INT", {"default": 64, "min": 8, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to decode at a time.", "advanced": true}], "temporal_overlap": ["INT", {"default": 8, "min": 4, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to overlap.", "advanced": true}]}}, "input_order": {"required": ["samples", "vae", "tile_size", "overlap", "temporal_size", "temporal_overlap"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "VAEDecodeTiled", "display_name": "VAE Decode (Tiled)", "description": "", "python_module": "nodes", "category": "_for_testing", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VAEEncodeTiled": {"input": {"required": {"pixels": ["IMAGE"], "vae": ["VAE"], "tile_size": ["INT", {"default": 512, "min": 64, "max": 4096, "step": 64, "advanced": true}], "overlap": ["INT", {"default": 64, "min": 0, "max": 4096, "step": 32, "advanced": true}], "temporal_size": ["INT", {"default": 64, "min": 8, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to encode at a time.", "advanced": true}], "temporal_overlap": ["INT", {"default": 8, "min": 4, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to overlap.", "advanced": true}]}}, "input_order": {"required": ["pixels", "vae", "tile_size", "overlap", "temporal_size", "temporal_overlap"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "VAEEncodeTiled", "display_name": "VAE Encode (Tiled)", "description": "", "python_module": "nodes", "category": "_for_testing", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "unCLIPCheckpointLoader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]]}}, "input_order": {"required": ["ckpt_name"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "VAE", "CLIP_VISION"], "output_is_list": [false, false, false, false], "output_name": ["MODEL", "CLIP", "VAE", "CLIP_VISION"], "name": "unCLIPCheckpointLoader", "display_name": "unCLIPCheckpointLoader", "description": "", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GLIGENLoader": {"input": {"required": {"gligen_name": [[]]}}, "input_order": {"required": ["gligen_name"]}, "is_input_list": false, "output": ["GLIGEN"], "output_is_list": [false], "output_name": ["GLIGEN"], "name": "GLIGENLoader", "display_name": "GLIGENLoader", "description": "", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GLIGENTextBoxApply": {"input": {"required": {"conditioning_to": ["CONDITIONING"], "clip": ["CLIP"], "gligen_textbox_model": ["GLIGEN"], "text": ["STRING", {"multiline": true, "dynamicPrompts": true}], "width": ["INT", {"default": 64, "min": 8, "max": 16384, "step": 8}], "height": ["INT", {"default": 64, "min": 8, "max": 16384, "step": 8}], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}]}}, "input_order": {"required": ["conditioning_to", "clip", "gligen_textbox_model", "text", "width", "height", "x", "y"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "GLIGENTextBoxApply", "display_name": "GLIGENTextBoxApply", "description": "", "python_module": "nodes", "category": "conditioning/gligen", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "InpaintModelConditioning": {"input": {"required": {"positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "vae": ["VAE"], "pixels": ["IMAGE"], "mask": ["MASK"], "noise_mask": ["BOOLEAN", {"default": true, "tooltip": "Add a noise mask to the latent so sampling will only happen within the mask. Might improve results or completely break things depending on the model."}]}}, "input_order": {"required": ["positive", "negative", "vae", "pixels", "mask", "noise_mask"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "name": "InpaintModelConditioning", "display_name": "InpaintModelConditioning", "description": "", "python_module": "nodes", "category": "conditioning/inpaint", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CheckpointLoader": {"input": {"required": {"config_name": [["anything_v3.yaml", "v1-inference.yaml", "v1-inference_clip_skip_2.yaml", "v1-inference_clip_skip_2_fp16.yaml", "v1-inference_fp16.yaml", "v1-inpainting-inference.yaml", "v2-inference-v.yaml", "v2-inference-v_fp32.yaml", "v2-inference.yaml", "v2-inference_fp32.yaml", "v2-inpainting-inference.yaml"]], "ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]]}}, "input_order": {"required": ["config_name", "ckpt_name"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "VAE"], "output_is_list": [false, false, false], "output_name": ["MODEL", "CLIP", "VAE"], "name": "CheckpointLoader", "display_name": "Load Checkpoint With Config (DEPRECATED)", "description": "", "python_module": "nodes", "category": "advanced/loaders", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": ["load model", "model loader"]}, "DiffusersLoader": {"input": {"required": {"model_path": [["hunyuan3d-delight-v2-0", "hunyuan3d-paint-v2-0", "hunyuan3d-paint-v2-0-turbo", "stable-video-diffusion-img2vid-xt-1-1"]]}}, "input_order": {"required": ["model_path"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "VAE"], "output_is_list": [false, false, false], "output_name": ["MODEL", "CLIP", "VAE"], "name": "DiffusersLoader", "display_name": "DiffusersLoader", "description": "", "python_module": "nodes", "category": "advanced/loaders/deprecated", "output_node": false, "has_intermediate_output": false, "search_aliases": ["load diffusers model"]}, "LoadLatent": {"input": {"required": {"latent": [[]]}}, "input_order": {"required": ["latent"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LoadLatent", "display_name": "LoadLatent", "description": "", "python_module": "nodes", "category": "_for_testing", "output_node": false, "has_intermediate_output": false, "search_aliases": ["import latent", "open latent"]}, "SaveLatent": {"input": {"required": {"samples": ["LATENT"], "filename_prefix": ["STRING", {"default": "latents/ComfyUI"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["samples", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "SaveLatent", "display_name": "SaveLatent", "description": "", "python_module": "nodes", "category": "_for_testing", "output_node": true, "has_intermediate_output": false, "search_aliases": ["export latent"]}, "ConditioningZeroOut": {"input": {"required": {"conditioning": ["CONDITIONING"]}}, "input_order": {"required": ["conditioning"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningZeroOut", "display_name": "ConditioningZeroOut", "description": "", "python_module": "nodes", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": ["null conditioning", "clear conditioning"]}, "ConditioningSetTimestepRange": {"input": {"required": {"conditioning": ["CONDITIONING"], "start": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["conditioning", "start", "end"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetTimestepRange", "display_name": "ConditioningSetTimestepRange", "description": "", "python_module": "nodes", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LoraLoaderModelOnly": {"input": {"required": {"model": ["MODEL"], "lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "strength_model": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "lora_name", "strength_model"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "LoraLoaderModelOnly", "display_name": "Load LoRA", "description": "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.", "python_module": "nodes", "category": "loaders", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model.", "The modified CLIP model."], "search_aliases": ["lora", "load lora", "apply lora", "lora loader", "lora model"], "essentials_category": "Image Generation"}, "LatentAdd": {"input": {"required": {"samples1": ["LATENT", {}], "samples2": ["LATENT", {}]}}, "input_order": {"required": ["samples1", "samples2"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentAdd", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["combine latents", "sum latents"], "essentials_category": null, "has_intermediate_output": false}, "LatentSubtract": {"input": {"required": {"samples1": ["LATENT", {}], "samples2": ["LATENT", {}]}}, "input_order": {"required": ["samples1", "samples2"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentSubtract", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["difference latent", "remove features"], "essentials_category": null, "has_intermediate_output": false}, "LatentMultiply": {"input": {"required": {"samples": ["LATENT", {}], "multiplier": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["samples", "multiplier"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentMultiply", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["scale latent", "amplify latent", "latent gain"], "essentials_category": null, "has_intermediate_output": false}, "LatentInterpolate": {"input": {"required": {"samples1": ["LATENT", {}], "samples2": ["LATENT", {}], "ratio": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["samples1", "samples2", "ratio"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentInterpolate", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["blend latent", "mix latent", "lerp latent", "transition"], "essentials_category": null, "has_intermediate_output": false}, "LatentConcat": {"input": {"required": {"samples1": ["LATENT", {}], "samples2": ["LATENT", {}], "dim": ["COMBO", {"multiselect": false, "options": ["x", "-x", "y", "-y", "t", "-t"]}]}}, "input_order": {"required": ["samples1", "samples2", "dim"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentConcat", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["join latents", "stitch latents"], "essentials_category": null, "has_intermediate_output": false}, "LatentCut": {"input": {"required": {"samples": ["LATENT", {}], "dim": ["COMBO", {"multiselect": false, "options": ["x", "y", "t"]}], "index": ["INT", {"default": 0, "min": -16384, "max": 16384, "step": 1}], "amount": ["INT", {"default": 1, "min": 1, "max": 16384, "step": 1}]}}, "input_order": {"required": ["samples", "dim", "index", "amount"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentCut", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["crop latent", "slice latent", "extract region"], "essentials_category": null, "has_intermediate_output": false}, "LatentCutToBatch": {"input": {"required": {"samples": ["LATENT", {}], "dim": ["COMBO", {"multiselect": false, "options": ["t", "x", "y"]}], "slice_size": ["INT", {"default": 1, "min": 1, "max": 16384, "step": 1}]}}, "input_order": {"required": ["samples", "dim", "slice_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentCutToBatch", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["slice to batch", "split latent", "tile latent"], "essentials_category": null, "has_intermediate_output": false}, "LatentBatch": {"input": {"required": {"samples1": ["LATENT", {}], "samples2": ["LATENT", {}]}}, "input_order": {"required": ["samples1", "samples2"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentBatch", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/batch", "output_node": false, "deprecated": true, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["combine latents", "merge latents", "join latents"], "essentials_category": null, "has_intermediate_output": false}, "LatentBatchSeedBehavior": {"input": {"required": {"samples": ["LATENT", {}], "seed_behavior": ["COMBO", {"default": "fixed", "multiselect": false, "options": ["random", "fixed"]}]}}, "input_order": {"required": ["samples", "seed_behavior"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentBatchSeedBehavior", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LatentApplyOperation": {"input": {"required": {"samples": ["LATENT", {}], "operation": ["LATENT_OPERATION", {}]}}, "input_order": {"required": ["samples", "operation"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentApplyOperation", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced/operations", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["transform latent"], "essentials_category": null, "has_intermediate_output": false}, "LatentApplyOperationCFG": {"input": {"required": {"model": ["MODEL", {}], "operation": ["LATENT_OPERATION", {}]}}, "input_order": {"required": ["model", "operation"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentApplyOperationCFG", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced/operations", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LatentOperationTonemapReinhard": {"input": {"required": {"multiplier": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["multiplier"]}, "is_input_list": false, "output": ["LATENT_OPERATION"], "output_is_list": [false], "output_name": ["LATENT_OPERATION"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentOperationTonemapReinhard", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced/operations", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["hdr latent"], "essentials_category": null, "has_intermediate_output": false}, "LatentOperationSharpen": {"input": {"required": {"sharpen_radius": ["INT", {"advanced": true, "default": 9, "min": 1, "max": 31, "step": 1}], "sigma": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.1, "max": 10.0, "step": 0.1}], "alpha": ["FLOAT", {"advanced": true, "default": 0.1, "min": 0.0, "max": 5.0, "step": 0.01}]}}, "input_order": {"required": ["sharpen_radius", "sigma", "alpha"]}, "is_input_list": false, "output": ["LATENT_OPERATION"], "output_is_list": [false], "output_name": ["LATENT_OPERATION"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentOperationSharpen", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/advanced/operations", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ReplaceVideoLatentFrames": {"input": {"required": {"destination": ["LATENT", {"tooltip": "The destination latent where frames will be replaced."}], "index": ["INT", {"tooltip": "The starting latent frame index in the destination latent where the source latent frames will be placed. Negative values count from the end.", "default": 0, "min": -16384, "max": 16384, "step": 1}]}, "optional": {"source": ["LATENT", {"tooltip": "The source latent providing frames to insert into the destination latent. If not provided, the destination latent is returned unchanged."}]}}, "input_order": {"required": ["destination", "index"], "optional": ["source"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "ReplaceVideoLatentFrames", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_latent", "category": "latent/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "HypernetworkLoader": {"input": {"required": {"model": ["MODEL", {}], "hypernetwork_name": ["COMBO", {"multiselect": false, "options": []}], "strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "hypernetwork_name", "strength"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "HypernetworkLoader", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hypernetwork", "category": "loaders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "UpscaleModelLoader": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["1x-ITF-SkinDiffDetail-Lite-v1.pth", "1xDeJPG_OmniSR.pth", "4x-AnimeSharp.pth", "4x-ClearRealityV1.pth", "4x-ClearRealityV1_Soft.pth", "4x-UltraSharp.pth", "4xFaceUpDAT.pth", "4xLSDIR.pth", "4xNMKDSuperscale_4xNMKDSuperscale.pt", "4xNomos8kHAT-L_otf.pth", "4xNomosUniDAT_otf.pth", "4xRealWebPhoto_v4_dat2.pth", "4xRealWebPhoto_v4_dat2.safetensors", "4xUltrasharp_4xUltrasharpV10.pt", "4x_NMKD-Siax_200k.pth", "4x_NMKD-Superscale-SP_178000_G.pth", "4x_NickelbackFS_72000_G.pth", "4x_foolhardy_Remacri.pth", "8x_NMKD-Faces_160000_G.pth", "8x_NMKD-Superscale_150000_G.pth", "ESRGAN_4x.pth", "ESRGAN_SRx4_DF2KOST_official-ff704c30.pth", "RealESRGAN_x2.pth", "RealESRGAN_x2plus.pth", "RealESRGAN_x4.pth", "RealESRGAN_x4plus.pth", "RealESRGAN_x4plus_anime_6B.pth", "RealESRNet_x4plus.pth", "ldsr/last.ckpt", "ltxv-spatial-upscaler-0.9.7.safetensors", "nmkdSiaxCX_200k.pt", "realesr-animevideov3.pth", "realesr-general-x4v3.pth", "realesrGeneralWDNX4_v3.pt", "x1_ITF_SkinDiffDetail_Lite_v1.pth"]}]}}, "input_order": {"required": ["model_name"]}, "is_input_list": false, "output": ["UPSCALE_MODEL"], "output_is_list": [false], "output_name": ["UPSCALE_MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "UpscaleModelLoader", "display_name": "Load Upscale Model", "description": "", "python_module": "comfy_extras.nodes_upscale_model", "category": "loaders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ImageUpscaleWithModel": {"input": {"required": {"upscale_model": ["UPSCALE_MODEL", {}], "image": ["IMAGE", {}]}}, "input_order": {"required": ["upscale_model", "image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageUpscaleWithModel", "display_name": "Upscale Image (using Model)", "description": "", "python_module": "comfy_extras.nodes_upscale_model", "category": "image/upscaling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["upscale", "upscaler", "upsc", "enlarge image", "super resolution", "hires", "superres", "increase resolution"], "essentials_category": null, "has_intermediate_output": false}, "ImageBlend": {"input": {"required": {"image1": ["IMAGE", {}], "image2": ["IMAGE", {}], "blend_factor": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "blend_mode": ["COMBO", {"multiselect": false, "options": ["normal", "multiply", "screen", "overlay", "soft_light", "difference"]}]}}, "input_order": {"required": ["image1", "image2", "blend_factor", "blend_mode"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageBlend", "display_name": "Image Blend", "description": "", "python_module": "comfy_extras.nodes_post_processing", "category": "image/postprocessing", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": "Image Tools", "has_intermediate_output": false}, "ImageBlur": {"input": {"required": {"image": ["IMAGE", {}], "blur_radius": ["INT", {"default": 1, "min": 1, "max": 31, "step": 1}], "sigma": ["FLOAT", {"default": 1.0, "min": 0.1, "max": 10.0, "step": 0.1}]}}, "input_order": {"required": ["image", "blur_radius", "sigma"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageBlur", "display_name": "Image Blur", "description": "", "python_module": "comfy_extras.nodes_post_processing", "category": "image/postprocessing", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ImageQuantize": {"input": {"required": {"image": ["IMAGE", {}], "colors": ["INT", {"default": 256, "min": 1, "max": 256, "step": 1}], "dither": ["COMBO", {"multiselect": false, "options": ["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"]}]}}, "input_order": {"required": ["image", "colors", "dither"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageQuantize", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_post_processing", "category": "image/postprocessing", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ImageSharpen": {"input": {"required": {"image": ["IMAGE", {}], "sharpen_radius": ["INT", {"advanced": true, "default": 1, "min": 1, "max": 31, "step": 1}], "sigma": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.1, "max": 10.0, "step": 0.01}], "alpha": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01}]}}, "input_order": {"required": ["image", "sharpen_radius", "sigma", "alpha"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageSharpen", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_post_processing", "category": "image/postprocessing", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ImageScaleToTotalPixels": {"input": {"required": {"image": ["IMAGE", {}], "upscale_method": ["COMBO", {"multiselect": false, "options": ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]}], "megapixels": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 16.0, "step": 0.01}], "resolution_steps": ["INT", {"advanced": true, "default": 1, "min": 1, "max": 256}]}}, "input_order": {"required": ["image", "upscale_method", "megapixels", "resolution_steps"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageScaleToTotalPixels", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_post_processing", "category": "image/upscaling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ResizeImageMaskNode": {"input": {"required": {"input": ["COMFY_MATCHTYPE_V3", {"template": {"template_id": "input_type", "allowed_types": "IMAGE,MASK"}}], "resize_type": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Select how to resize: by exact dimensions, scale factor, matching another image, etc.", "options": [{"key": "scale dimensions", "inputs": {"required": {"width": ["INT", {"tooltip": "Target width in pixels. Set to 0 to auto-calculate from height while preserving aspect ratio.", "default": 512, "min": 0, "max": 16384, "step": 1}], "height": ["INT", {"tooltip": "Target height in pixels. Set to 0 to auto-calculate from width while preserving aspect ratio.", "default": 512, "min": 0, "max": 16384, "step": 1}], "crop": ["COMBO", {"tooltip": "How to handle aspect ratio mismatch: 'disabled' stretches to fit, 'center' crops to maintain aspect ratio.", "default": "center", "multiselect": false, "options": ["disabled", "center"]}]}}}, {"key": "scale by multiplier", "inputs": {"required": {"multiplier": ["FLOAT", {"tooltip": "Scale factor (e.g., 2.0 doubles size, 0.5 halves size).", "default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}]}}}, {"key": "scale longer dimension", "inputs": {"required": {"longer_size": ["INT", {"tooltip": "The longer edge will be resized to this value. Aspect ratio is preserved.", "default": 512, "min": 0, "max": 16384, "step": 1}]}}}, {"key": "scale shorter dimension", "inputs": {"required": {"shorter_size": ["INT", {"tooltip": "The shorter edge will be resized to this value. Aspect ratio is preserved.", "default": 512, "min": 0, "max": 16384, "step": 1}]}}}, {"key": "scale width", "inputs": {"required": {"width": ["INT", {"tooltip": "Target width in pixels. Height auto-adjusts to preserve aspect ratio.", "default": 512, "min": 0, "max": 16384, "step": 1}]}}}, {"key": "scale height", "inputs": {"required": {"height": ["INT", {"tooltip": "Target height in pixels. Width auto-adjusts to preserve aspect ratio.", "default": 512, "min": 0, "max": 16384, "step": 1}]}}}, {"key": "scale total pixels", "inputs": {"required": {"megapixels": ["FLOAT", {"tooltip": "Target total megapixels (e.g., 1.0 \u2248 1024\u00d71024). Aspect ratio is preserved.", "default": 1.0, "min": 0.01, "max": 16.0, "step": 0.01}]}}}, {"key": "match size", "inputs": {"required": {"match": ["IMAGE,MASK", {"tooltip": "Resize input to match the dimensions of this reference image or mask."}], "crop": ["COMBO", {"tooltip": "How to handle aspect ratio mismatch: 'disabled' stretches to fit, 'center' crops to maintain aspect ratio.", "default": "center", "multiselect": false, "options": ["disabled", "center"]}]}}}, {"key": "scale to multiple", "inputs": {"required": {"multiple": ["INT", {"tooltip": "Resize so width and height are divisible by this number. Useful for latent alignment (e.g., 8 or 64).", "default": 8, "min": 1, "max": 16384, "step": 1}]}}}]}], "scale_method": ["COMBO", {"tooltip": "Interpolation algorithm. 'area' is best for downscaling, 'lanczos' for upscaling, 'nearest-exact' for pixel art.", "default": "area", "multiselect": false, "options": ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]}]}}, "input_order": {"required": ["input", "resize_type", "scale_method"]}, "is_input_list": false, "output": ["COMFY_MATCHTYPE_V3"], "output_is_list": [false], "output_name": ["resized"], "output_tooltips": [null], "output_matchtypes": ["input_type"], "name": "ResizeImageMaskNode", "display_name": "Resize Image/Mask", "description": "Resize an image or mask using various scaling methods.", "python_module": "comfy_extras.nodes_post_processing", "category": "transform", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["resize", "resize image", "resize mask", "scale", "scale image", "scale mask", "image resize", "change size", "dimensions", "shrink", "enlarge"], "essentials_category": null, "has_intermediate_output": false}, "BatchImagesNode": {"input": {"required": {"images": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"image": ["IMAGE", {}]}}, "prefix": "image", "min": 2, "max": 50}}]}}, "input_order": {"required": ["images"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "BatchImagesNode", "display_name": "Batch Images", "description": "", "python_module": "comfy_extras.nodes_post_processing", "category": "image", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["batch", "image batch", "batch images", "combine images", "merge images", "stack images"], "essentials_category": "Image Tools", "has_intermediate_output": false}, "BatchMasksNode": {"input": {"required": {"masks": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"mask": ["MASK", {}]}}, "prefix": "mask", "min": 2, "max": 50}}]}}, "input_order": {"required": ["masks"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "BatchMasksNode", "display_name": "Batch Masks", "description": "", "python_module": "comfy_extras.nodes_post_processing", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["combine masks", "stack masks", "merge masks"], "essentials_category": null, "has_intermediate_output": false}, "BatchLatentsNode": {"input": {"required": {"latents": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"latent": ["LATENT", {}]}}, "prefix": "latent", "min": 2, "max": 50}}]}}, "input_order": {"required": ["latents"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "BatchLatentsNode", "display_name": "Batch Latents", "description": "", "python_module": "comfy_extras.nodes_post_processing", "category": "latent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["combine latents", "stack latents", "merge latents"], "essentials_category": null, "has_intermediate_output": false}, "LatentCompositeMasked": {"input": {"required": {"destination": ["LATENT", {}], "source": ["LATENT", {}], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "resize_source": ["BOOLEAN", {"default": false}]}, "optional": {"mask": ["MASK", {}]}}, "input_order": {"required": ["destination", "source", "x", "y", "resize_source"], "optional": ["mask"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentCompositeMasked", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "latent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["overlay latent", "layer latent", "paste latent", "inpaint latent"], "essentials_category": null, "has_intermediate_output": false}, "ImageCompositeMasked": {"input": {"required": {"destination": ["IMAGE", {}], "source": ["IMAGE", {}], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "resize_source": ["BOOLEAN", {"default": false}]}, "optional": {"mask": ["MASK", {}]}}, "input_order": {"required": ["destination", "source", "x", "y", "resize_source"], "optional": ["mask"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageCompositeMasked", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "image", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["paste image", "overlay", "layer"], "essentials_category": null, "has_intermediate_output": false}, "MaskToImage": {"input": {"required": {"mask": ["MASK", {}]}}, "input_order": {"required": ["mask"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "MaskToImage", "display_name": "Convert Mask to Image", "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["convert mask"], "essentials_category": null, "has_intermediate_output": false}, "ImageToMask": {"input": {"required": {"image": ["IMAGE", {}], "channel": ["COMBO", {"multiselect": false, "options": ["red", "green", "blue", "alpha"]}]}}, "input_order": {"required": ["image", "channel"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageToMask", "display_name": "Convert Image to Mask", "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["extract channel", "channel to mask"], "essentials_category": null, "has_intermediate_output": false}, "ImageColorToMask": {"input": {"required": {"image": ["IMAGE", {}], "color": ["INT", {"default": 0, "min": 0, "max": 16777215, "step": 1, "display": "number"}]}}, "input_order": {"required": ["image", "color"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageColorToMask", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["color keying", "chroma key"], "essentials_category": null, "has_intermediate_output": false}, "SolidMask": {"input": {"required": {"value": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "width": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "height": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}]}}, "input_order": {"required": ["value", "width", "height"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "SolidMask", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "InvertMask": {"input": {"required": {"mask": ["MASK", {}]}}, "input_order": {"required": ["mask"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "InvertMask", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["reverse mask", "flip mask"], "essentials_category": null, "has_intermediate_output": false}, "CropMask": {"input": {"required": {"mask": ["MASK", {}], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "width": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "height": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}]}}, "input_order": {"required": ["mask", "x", "y", "width", "height"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "CropMask", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["cut mask", "extract mask region", "mask slice"], "essentials_category": null, "has_intermediate_output": false}, "MaskComposite": {"input": {"required": {"destination": ["MASK", {}], "source": ["MASK", {}], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "operation": ["COMBO", {"multiselect": false, "options": ["multiply", "add", "subtract", "and", "or", "xor"]}]}}, "input_order": {"required": ["destination", "source", "x", "y", "operation"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "MaskComposite", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["combine masks", "blend masks", "layer masks"], "essentials_category": null, "has_intermediate_output": false}, "FeatherMask": {"input": {"required": {"mask": ["MASK", {}], "left": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "top": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "right": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "bottom": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}]}}, "input_order": {"required": ["mask", "left", "top", "right", "bottom"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "FeatherMask", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["soft edge mask", "blur mask edges", "gradient mask edge"], "essentials_category": null, "has_intermediate_output": false}, "GrowMask": {"input": {"required": {"mask": ["MASK", {}], "expand": ["INT", {"default": 0, "min": -16384, "max": 16384, "step": 1}], "tapered_corners": ["BOOLEAN", {"advanced": true, "default": true}]}}, "input_order": {"required": ["mask", "expand", "tapered_corners"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "GrowMask", "display_name": "Grow Mask", "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["expand mask", "shrink mask"], "essentials_category": null, "has_intermediate_output": false}, "ThresholdMask": {"input": {"required": {"mask": ["MASK", {}], "value": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["mask", "value"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "output_tooltips": [null], "output_matchtypes": null, "name": "ThresholdMask", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["binary mask"], "essentials_category": null, "has_intermediate_output": false}, "MaskPreview": {"input": {"required": {"mask": ["MASK", {}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["mask"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "MaskPreview", "display_name": "Preview Mask", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "comfy_extras.nodes_mask", "category": "mask", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["show mask", "view mask", "inspect mask", "debug mask"], "essentials_category": null, "has_intermediate_output": false}, "PorterDuffImageComposite": {"input": {"required": {"source": ["IMAGE", {}], "source_alpha": ["MASK", {}], "destination": ["IMAGE", {}], "destination_alpha": ["MASK", {}], "mode": ["COMBO", {"default": "DST", "multiselect": false, "options": ["ADD", "CLEAR", "DARKEN", "DST", "DST_ATOP", "DST_IN", "DST_OUT", "DST_OVER", "LIGHTEN", "MULTIPLY", "OVERLAY", "SCREEN", "SRC", "SRC_ATOP", "SRC_IN", "SRC_OUT", "SRC_OVER", "XOR"]}]}}, "input_order": {"required": ["source", "source_alpha", "destination", "destination_alpha", "mode"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "PorterDuffImageComposite", "display_name": "Porter-Duff Image Composite", "description": "", "python_module": "comfy_extras.nodes_compositing", "category": "mask/compositing", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["alpha composite", "blend modes", "layer blend", "transparency blend"], "essentials_category": null, "has_intermediate_output": false}, "SplitImageWithAlpha": {"input": {"required": {"image": ["IMAGE", {}]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "SplitImageWithAlpha", "display_name": "Split Image with Alpha", "description": "", "python_module": "comfy_extras.nodes_compositing", "category": "mask/compositing", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["extract alpha", "separate transparency", "remove alpha"], "essentials_category": null, "has_intermediate_output": false}, "JoinImageWithAlpha": {"input": {"required": {"image": ["IMAGE", {}], "alpha": ["MASK", {}]}}, "input_order": {"required": ["image", "alpha"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "JoinImageWithAlpha", "display_name": "Join Image with Alpha", "description": "", "python_module": "comfy_extras.nodes_compositing", "category": "mask/compositing", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["add transparency", "apply alpha", "composite alpha", "RGBA"], "essentials_category": null, "has_intermediate_output": false}, "RebatchLatents": {"input": {"required": {"latents": ["LATENT", {}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["latents", "batch_size"]}, "is_input_list": true, "output": ["LATENT"], "output_is_list": [true], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "RebatchLatents", "display_name": "Rebatch Latents", "description": "", "python_module": "comfy_extras.nodes_rebatch", "category": "latent/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RebatchImages": {"input": {"required": {"images": ["IMAGE", {}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["images", "batch_size"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RebatchImages", "display_name": "Rebatch Images", "description": "", "python_module": "comfy_extras.nodes_rebatch", "category": "image/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ModelMergeSimple": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "ratio": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "ratio"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeSimple", "display_name": "ModelMergeSimple", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeBlocks": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "input": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "out": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "input", "middle", "out"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeBlocks", "display_name": "ModelMergeBlocks", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeSubtract": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "multiplier": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "multiplier"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeSubtract", "display_name": "ModelMergeSubtract", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeAdd": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"]}}, "input_order": {"required": ["model1", "model2"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeAdd", "display_name": "ModelMergeAdd", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CheckpointSave": {"input": {"required": {"model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "filename_prefix": ["STRING", {"default": "checkpoints/ComfyUI"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["model", "clip", "vae", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "CheckpointSave", "display_name": "Save Checkpoint", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": true, "has_intermediate_output": false, "search_aliases": ["save model", "export checkpoint", "merge save"]}, "CLIPMergeSimple": {"input": {"required": {"clip1": ["CLIP"], "clip2": ["CLIP"], "ratio": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["clip1", "clip2", "ratio"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "CLIPMergeSimple", "display_name": "CLIPMergeSimple", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CLIPMergeSubtract": {"input": {"required": {"clip1": ["CLIP"], "clip2": ["CLIP"], "multiplier": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["clip1", "clip2", "multiplier"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "CLIPMergeSubtract", "display_name": "CLIPMergeSubtract", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": false, "has_intermediate_output": false, "search_aliases": ["clip difference", "text encoder subtract"]}, "CLIPMergeAdd": {"input": {"required": {"clip1": ["CLIP"], "clip2": ["CLIP"]}}, "input_order": {"required": ["clip1", "clip2"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "CLIPMergeAdd", "display_name": "CLIPMergeAdd", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": false, "has_intermediate_output": false, "search_aliases": ["combine clip"]}, "CLIPSave": {"input": {"required": {"clip": ["CLIP"], "filename_prefix": ["STRING", {"default": "clip/ComfyUI"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["clip", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "CLIPSave", "display_name": "CLIPSave", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "VAESave": {"input": {"required": {"vae": ["VAE"], "filename_prefix": ["STRING", {"default": "vae/ComfyUI_vae"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["vae", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "VAESave", "display_name": "VAESave", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ModelSave": {"input": {"required": {"model": ["MODEL"], "filename_prefix": ["STRING", {"default": "diffusion_models/ComfyUI"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["model", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "ModelSave", "display_name": "ModelSave", "description": "", "python_module": "comfy_extras.nodes_model_merging", "category": "advanced/model_merging", "output_node": true, "has_intermediate_output": false, "search_aliases": ["export model", "checkpoint save"]}, "TomePatchModel": {"input": {"required": {"model": ["MODEL", {}], "ratio": ["FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model", "ratio"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "TomePatchModel", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_tomesd", "category": "model_patches/unet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodeSDXLRefiner": {"input": {"required": {"ascore": ["FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "width": ["INT", {"default": 1024, "min": 0, "max": 16384}], "height": ["INT", {"default": 1024, "min": 0, "max": 16384}], "text": ["STRING", {"multiline": true, "dynamicPrompts": true}], "clip": ["CLIP", {}]}}, "input_order": {"required": ["ascore", "width", "height", "text", "clip"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodeSDXLRefiner", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_clip_sdxl", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodeSDXL": {"input": {"required": {"clip": ["CLIP", {}], "width": ["INT", {"default": 1024, "min": 0, "max": 16384}], "height": ["INT", {"default": 1024, "min": 0, "max": 16384}], "crop_w": ["INT", {"advanced": true, "default": 0, "min": 0, "max": 16384}], "crop_h": ["INT", {"advanced": true, "default": 0, "min": 0, "max": 16384}], "target_width": ["INT", {"default": 1024, "min": 0, "max": 16384}], "target_height": ["INT", {"default": 1024, "min": 0, "max": 16384}], "text_g": ["STRING", {"multiline": true, "dynamicPrompts": true}], "text_l": ["STRING", {"multiline": true, "dynamicPrompts": true}]}}, "input_order": {"required": ["clip", "width", "height", "crop_w", "crop_h", "target_width", "target_height", "text_g", "text_l"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodeSDXL", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_clip_sdxl", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Canny": {"input": {"required": {"image": ["IMAGE", {}], "low_threshold": ["FLOAT", {"default": 0.4, "min": 0.01, "max": 0.99, "step": 0.01}], "high_threshold": ["FLOAT", {"default": 0.8, "min": 0.01, "max": 0.99, "step": 0.01}]}}, "input_order": {"required": ["image", "low_threshold", "high_threshold"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "Canny", "display_name": "Canny", "description": "", "python_module": "comfy_extras.nodes_canny", "category": "image/preprocessors", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["edge detection", "outline", "contour detection", "line art"], "essentials_category": "Image Tools", "has_intermediate_output": false}, "FreeU": {"input": {"required": {"model": ["MODEL", {}], "b1": ["FLOAT", {"advanced": true, "default": 1.1, "min": 0.0, "max": 10.0, "step": 0.01}], "b2": ["FLOAT", {"advanced": true, "default": 1.2, "min": 0.0, "max": 10.0, "step": 0.01}], "s1": ["FLOAT", {"advanced": true, "default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}], "s2": ["FLOAT", {"advanced": true, "default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "b1", "b2", "s1", "s2"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "FreeU", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_freelunch", "category": "model_patches/unet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FreeU_V2": {"input": {"required": {"model": ["MODEL", {}], "b1": ["FLOAT", {"advanced": true, "default": 1.3, "min": 0.0, "max": 10.0, "step": 0.01}], "b2": ["FLOAT", {"advanced": true, "default": 1.4, "min": 0.0, "max": 10.0, "step": 0.01}], "s1": ["FLOAT", {"advanced": true, "default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}], "s2": ["FLOAT", {"advanced": true, "default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "b1", "b2", "s1", "s2"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "FreeU_V2", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_freelunch", "category": "model_patches/unet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerCustom": {"input": {"required": {"model": ["MODEL", {}], "add_noise": ["BOOLEAN", {"advanced": true, "default": true}], "noise_seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}], "positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "sampler": ["SAMPLER", {}], "sigmas": ["SIGMAS", {}], "latent_image": ["LATENT", {}]}}, "input_order": {"required": ["model", "add_noise", "noise_seed", "cfg", "positive", "negative", "sampler", "sigmas", "latent_image"]}, "is_input_list": false, "output": ["LATENT", "LATENT"], "output_is_list": [false, false], "output_name": ["output", "denoised_output"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "SamplerCustom", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "BasicScheduler": {"input": {"required": {"model": ["MODEL", {}], "scheduler": ["COMBO", {"multiselect": false, "options": ["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model", "scheduler", "steps", "denoise"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "BasicScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KarrasScheduler": {"input": {"required": {"steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "sigma_max": ["FLOAT", {"advanced": true, "default": 14.614642, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "sigma_min": ["FLOAT", {"advanced": true, "default": 0.0291675, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "rho": ["FLOAT", {"advanced": true, "default": 7.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["steps", "sigma_max", "sigma_min", "rho"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "KarrasScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ExponentialScheduler": {"input": {"required": {"steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "sigma_max": ["FLOAT", {"advanced": true, "default": 14.614642, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "sigma_min": ["FLOAT", {"advanced": true, "default": 0.0291675, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["steps", "sigma_max", "sigma_min"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "ExponentialScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PolyexponentialScheduler": {"input": {"required": {"steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "sigma_max": ["FLOAT", {"advanced": true, "default": 14.614642, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "sigma_min": ["FLOAT", {"advanced": true, "default": 0.0291675, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "rho": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["steps", "sigma_max", "sigma_min", "rho"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "PolyexponentialScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LaplaceScheduler": {"input": {"required": {"steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "sigma_max": ["FLOAT", {"advanced": true, "default": 14.614642, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "sigma_min": ["FLOAT", {"advanced": true, "default": 0.0291675, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "mu": ["FLOAT", {"advanced": true, "default": 0.0, "min": -10.0, "max": 10.0, "step": 0.1, "round": false}], "beta": ["FLOAT", {"advanced": true, "default": 0.5, "min": 0.0, "max": 10.0, "step": 0.1, "round": false}]}}, "input_order": {"required": ["steps", "sigma_max", "sigma_min", "mu", "beta"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "LaplaceScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "VPScheduler": {"input": {"required": {"steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "beta_d": ["FLOAT", {"advanced": true, "default": 19.9, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "beta_min": ["FLOAT", {"advanced": true, "default": 0.1, "min": 0.0, "max": 5000.0, "step": 0.01, "round": false}], "eps_s": ["FLOAT", {"advanced": true, "default": 0.001, "min": 0.0, "max": 1.0, "step": 0.0001, "round": false}]}}, "input_order": {"required": ["steps", "beta_d", "beta_min", "eps_s"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "VPScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "BetaSamplingScheduler": {"input": {"required": {"model": ["MODEL", {}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "alpha": ["FLOAT", {"advanced": true, "default": 0.6, "min": 0.0, "max": 50.0, "step": 0.01, "round": false}], "beta": ["FLOAT", {"advanced": true, "default": 0.6, "min": 0.0, "max": 50.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["model", "steps", "alpha", "beta"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "BetaSamplingScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SDTurboScheduler": {"input": {"required": {"model": ["MODEL", {}], "steps": ["INT", {"default": 1, "min": 1, "max": 10}], "denoise": ["FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model", "steps", "denoise"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "SDTurboScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KSamplerSelect": {"input": {"required": {"sampler_name": ["COMBO", {"multiselect": false, "options": ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]}]}}, "input_order": {"required": ["sampler_name"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "KSamplerSelect", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerEulerAncestral": {"input": {"required": {"eta": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["eta", "s_noise"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerEulerAncestral", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerEulerAncestralCFGPP": {"input": {"required": {"eta": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["eta", "s_noise"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerEulerAncestralCFGPP", "display_name": "SamplerEulerAncestralCFG++", "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerLMS": {"input": {"required": {"order": ["INT", {"advanced": true, "default": 4, "min": 1, "max": 100}]}}, "input_order": {"required": ["order"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerLMS", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerDPMPP_3M_SDE": {"input": {"required": {"eta": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "noise_device": ["COMBO", {"advanced": true, "multiselect": false, "options": ["gpu", "cpu"]}]}}, "input_order": {"required": ["eta", "s_noise", "noise_device"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerDPMPP_3M_SDE", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerDPMPP_2M_SDE": {"input": {"required": {"solver_type": ["COMBO", {"multiselect": false, "options": ["midpoint", "heun"]}], "eta": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "noise_device": ["COMBO", {"advanced": true, "multiselect": false, "options": ["gpu", "cpu"]}]}}, "input_order": {"required": ["solver_type", "eta", "s_noise", "noise_device"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerDPMPP_2M_SDE", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerDPMPP_SDE": {"input": {"required": {"eta": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "r": ["FLOAT", {"advanced": true, "default": 0.5, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "noise_device": ["COMBO", {"advanced": true, "multiselect": false, "options": ["gpu", "cpu"]}]}}, "input_order": {"required": ["eta", "s_noise", "r", "noise_device"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerDPMPP_SDE", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerDPMPP_2S_Ancestral": {"input": {"required": {"eta": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["eta", "s_noise"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerDPMPP_2S_Ancestral", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerDPMAdaptative": {"input": {"required": {"order": ["INT", {"advanced": true, "default": 3, "min": 2, "max": 3}], "rtol": ["FLOAT", {"advanced": true, "default": 0.05, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "atol": ["FLOAT", {"advanced": true, "default": 0.0078, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "h_init": ["FLOAT", {"advanced": true, "default": 0.05, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "pcoeff": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "icoeff": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "dcoeff": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "accept_safety": ["FLOAT", {"advanced": true, "default": 0.81, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "eta": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["order", "rtol", "atol", "h_init", "pcoeff", "icoeff", "dcoeff", "accept_safety", "eta", "s_noise"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerDPMAdaptative", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerER_SDE": {"input": {"required": {"solver_type": ["COMBO", {"multiselect": false, "options": ["ER-SDE", "Reverse-time SDE", "ODE"]}], "max_stage": ["INT", {"advanced": true, "default": 3, "min": 1, "max": 3}], "eta": ["FLOAT", {"tooltip": "Stochastic strength of reverse-time SDE.\nWhen eta=0, it reduces to deterministic ODE. This setting doesn't apply to ER-SDE solver type.", "advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["solver_type", "max_stage", "eta", "s_noise"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerER_SDE", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerSASolver": {"input": {"required": {"model": ["MODEL", {}], "eta": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": false}], "sde_start_percent": ["FLOAT", {"advanced": true, "default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}], "sde_end_percent": ["FLOAT", {"advanced": true, "default": 0.8, "min": 0.0, "max": 1.0, "step": 0.001}], "s_noise": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "predictor_order": ["INT", {"advanced": true, "default": 3, "min": 1, "max": 6}], "corrector_order": ["INT", {"advanced": true, "default": 4, "min": 0, "max": 6}], "use_pece": ["BOOLEAN", {"advanced": true}], "simple_order_2": ["BOOLEAN", {"advanced": true}]}}, "input_order": {"required": ["model", "eta", "sde_start_percent", "sde_end_percent", "s_noise", "predictor_order", "corrector_order", "use_pece", "simple_order_2"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerSASolver", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["sde"], "essentials_category": null, "has_intermediate_output": false}, "SamplerSEEDS2": {"input": {"required": {"solver_type": ["COMBO", {"multiselect": false, "options": ["phi_1", "phi_2"]}], "eta": ["FLOAT", {"tooltip": "Stochastic strength", "advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"tooltip": "SDE noise multiplier", "advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "r": ["FLOAT", {"tooltip": "Relative step size for the intermediate stage (c2 node)", "advanced": true, "default": 0.5, "min": 0.01, "max": 1.0, "step": 0.01, "round": false}]}}, "input_order": {"required": ["solver_type", "eta", "s_noise", "r"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerSEEDS2", "display_name": null, "description": "This sampler node can represent multiple samplers:\n\nseeds_2\n- default setting\n\nexp_heun_2_x0\n- solver_type=phi_2, r=1.0, eta=0.0\n\nexp_heun_2_x0_sde\n- solver_type=phi_2, r=1.0, eta=1.0, s_noise=1.0", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["sde", "exp heun"], "essentials_category": null, "has_intermediate_output": false}, "SplitSigmas": {"input": {"required": {"sigmas": ["SIGMAS", {}], "step": ["INT", {"default": 0, "min": 0, "max": 10000}]}}, "input_order": {"required": ["sigmas", "step"]}, "is_input_list": false, "output": ["SIGMAS", "SIGMAS"], "output_is_list": [false, false], "output_name": ["high_sigmas", "low_sigmas"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "SplitSigmas", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/sigmas", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SplitSigmasDenoise": {"input": {"required": {"sigmas": ["SIGMAS", {}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["sigmas", "denoise"]}, "is_input_list": false, "output": ["SIGMAS", "SIGMAS"], "output_is_list": [false, false], "output_name": ["high_sigmas", "low_sigmas"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "SplitSigmasDenoise", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/sigmas", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FlipSigmas": {"input": {"required": {"sigmas": ["SIGMAS", {}]}}, "input_order": {"required": ["sigmas"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "FlipSigmas", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/sigmas", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SetFirstSigma": {"input": {"required": {"sigmas": ["SIGMAS", {}], "sigma": ["FLOAT", {"default": 136.0, "min": 0.0, "max": 20000.0, "step": 0.001, "round": false}]}}, "input_order": {"required": ["sigmas", "sigma"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "SetFirstSigma", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/sigmas", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ExtendIntermediateSigmas": {"input": {"required": {"sigmas": ["SIGMAS", {}], "steps": ["INT", {"default": 2, "min": 1, "max": 100}], "start_at_sigma": ["FLOAT", {"default": -1.0, "min": -1.0, "max": 20000.0, "step": 0.01, "round": false}], "end_at_sigma": ["FLOAT", {"default": 12.0, "min": 0.0, "max": 20000.0, "step": 0.01, "round": false}], "spacing": ["COMBO", {"multiselect": false, "options": ["linear", "cosine", "sine"]}]}}, "input_order": {"required": ["sigmas", "steps", "start_at_sigma", "end_at_sigma", "spacing"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "ExtendIntermediateSigmas", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/sigmas", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["interpolate sigmas"], "essentials_category": null, "has_intermediate_output": false}, "SamplingPercentToSigma": {"input": {"required": {"model": ["MODEL", {}], "sampling_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.0001}], "return_actual_sigma": ["BOOLEAN", {"tooltip": "Return the actual sigma value instead of the value used for interval checks.\nThis only affects results at 0.0 and 1.0.", "default": false}]}}, "input_order": {"required": ["model", "sampling_percent", "return_actual_sigma"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["sigma_value"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplingPercentToSigma", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/sigmas", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CFGGuider": {"input": {"required": {"model": ["MODEL", {}], "positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}]}}, "input_order": {"required": ["model", "positive", "negative", "cfg"]}, "is_input_list": false, "output": ["GUIDER"], "output_is_list": [false], "output_name": ["GUIDER"], "output_tooltips": [null], "output_matchtypes": null, "name": "CFGGuider", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/guiders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "DualCFGGuider": {"input": {"required": {"model": ["MODEL", {}], "cond1": ["CONDITIONING", {}], "cond2": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "cfg_conds": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}], "cfg_cond2_negative": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}], "style": ["COMBO", {"multiselect": false, "options": ["regular", "nested"]}]}}, "input_order": {"required": ["model", "cond1", "cond2", "negative", "cfg_conds", "cfg_cond2_negative", "style"]}, "is_input_list": false, "output": ["GUIDER"], "output_is_list": [false], "output_name": ["GUIDER"], "output_tooltips": [null], "output_matchtypes": null, "name": "DualCFGGuider", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/guiders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["dual prompt guidance"], "essentials_category": null, "has_intermediate_output": false}, "BasicGuider": {"input": {"required": {"model": ["MODEL", {}], "conditioning": ["CONDITIONING", {}]}}, "input_order": {"required": ["model", "conditioning"]}, "is_input_list": false, "output": ["GUIDER"], "output_is_list": [false], "output_name": ["GUIDER"], "output_tooltips": [null], "output_matchtypes": null, "name": "BasicGuider", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/guiders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RandomNoise": {"input": {"required": {"noise_seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}}, "input_order": {"required": ["noise_seed"]}, "is_input_list": false, "output": ["NOISE"], "output_is_list": [false], "output_name": ["NOISE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RandomNoise", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/noise", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "DisableNoise": {"input": {"required": {}}, "input_order": {"required": []}, "is_input_list": false, "output": ["NOISE"], "output_is_list": [false], "output_name": ["NOISE"], "output_tooltips": [null], "output_matchtypes": null, "name": "DisableNoise", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling/noise", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["zero noise"], "essentials_category": null, "has_intermediate_output": false}, "AddNoise": {"input": {"required": {"model": ["MODEL", {}], "noise": ["NOISE", {}], "sigmas": ["SIGMAS", {}], "latent_image": ["LATENT", {}]}}, "input_order": {"required": ["model", "noise", "sigmas", "latent_image"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "AddNoise", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "_for_testing/custom_sampling/noise", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerCustomAdvanced": {"input": {"required": {"noise": ["NOISE", {}], "guider": ["GUIDER", {}], "sampler": ["SAMPLER", {}], "sigmas": ["SIGMAS", {}], "latent_image": ["LATENT", {}]}}, "input_order": {"required": ["noise", "guider", "sampler", "sigmas", "latent_image"]}, "is_input_list": false, "output": ["LATENT", "LATENT"], "output_is_list": [false, false], "output_name": ["output", "denoised_output"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "SamplerCustomAdvanced", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "sampling/custom_sampling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ManualSigmas": {"input": {"required": {"sigmas": ["STRING", {"default": "1, 0.5", "multiline": false}]}}, "input_order": {"required": ["sigmas"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "ManualSigmas", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_custom_sampler", "category": "_for_testing/custom_sampling", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["custom noise schedule", "define sigmas"], "essentials_category": null, "has_intermediate_output": false}, "HyperTile": {"input": {"required": {"model": ["MODEL", {}], "tile_size": ["INT", {"advanced": true, "default": 256, "min": 1, "max": 2048}], "swap_size": ["INT", {"advanced": true, "default": 2, "min": 1, "max": 128}], "max_depth": ["INT", {"advanced": true, "default": 0, "min": 0, "max": 10}], "scale_depth": ["BOOLEAN", {"advanced": true, "default": false}]}}, "input_order": {"required": ["model", "tile_size", "swap_size", "max_depth", "scale_depth"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "HyperTile", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hypertile", "category": "model_patches/unet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ModelSamplingDiscrete": {"input": {"required": {"model": ["MODEL"], "sampling": [["eps", "v_prediction", "lcm", "x0", "img_to_img", "img_to_img_flow"]], "zsnr": ["BOOLEAN", {"default": false, "advanced": true}]}}, "input_order": {"required": ["model", "sampling", "zsnr"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelSamplingDiscrete", "display_name": "ModelSamplingDiscrete", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/model", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelSamplingContinuousEDM": {"input": {"required": {"model": ["MODEL"], "sampling": [["v_prediction", "edm", "edm_playground_v2.5", "eps", "cosmos_rflow"]], "sigma_max": ["FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step": 0.001, "round": false, "advanced": true}], "sigma_min": ["FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step": 0.001, "round": false, "advanced": true}]}}, "input_order": {"required": ["model", "sampling", "sigma_max", "sigma_min"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelSamplingContinuousEDM", "display_name": "ModelSamplingContinuousEDM", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/model", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelSamplingContinuousV": {"input": {"required": {"model": ["MODEL"], "sampling": [["v_prediction"]], "sigma_max": ["FLOAT", {"default": 500.0, "min": 0.0, "max": 1000.0, "step": 0.001, "round": false, "advanced": true}], "sigma_min": ["FLOAT", {"default": 0.03, "min": 0.0, "max": 1000.0, "step": 0.001, "round": false, "advanced": true}]}}, "input_order": {"required": ["model", "sampling", "sigma_max", "sigma_min"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelSamplingContinuousV", "display_name": "ModelSamplingContinuousV", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/model", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelSamplingStableCascade": {"input": {"required": {"model": ["MODEL"], "shift": ["FLOAT", {"default": 2.0, "min": 0.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "shift"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelSamplingStableCascade", "display_name": "ModelSamplingStableCascade", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/model", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelSamplingSD3": {"input": {"required": {"model": ["MODEL"], "shift": ["FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "shift"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelSamplingSD3", "display_name": "ModelSamplingSD3", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/model", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelSamplingAuraFlow": {"input": {"required": {"model": ["MODEL"], "shift": ["FLOAT", {"default": 1.73, "min": 0.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "shift"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelSamplingAuraFlow", "display_name": "ModelSamplingAuraFlow", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/model", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelSamplingFlux": {"input": {"required": {"model": ["MODEL"], "max_shift": ["FLOAT", {"default": 1.15, "min": 0.0, "max": 100.0, "step": 0.01, "advanced": true}], "base_shift": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 100.0, "step": 0.01, "advanced": true}], "width": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 8}], "height": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 8}]}}, "input_order": {"required": ["model", "max_shift", "base_shift", "width", "height"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelSamplingFlux", "display_name": "ModelSamplingFlux", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/model", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RescaleCFG": {"input": {"required": {"model": ["MODEL"], "multiplier": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model", "multiplier"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "RescaleCFG", "display_name": "RescaleCFG", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/model", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelComputeDtype": {"input": {"required": {"model": ["MODEL"], "dtype": [["default", "fp32", "fp16", "bf16"], {"advanced": true}]}}, "input_order": {"required": ["model", "dtype"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelComputeDtype", "display_name": "ModelComputeDtype", "description": "", "python_module": "comfy_extras.nodes_model_advanced", "category": "advanced/debug/model", "output_node": false, "has_intermediate_output": false, "search_aliases": ["model precision", "change dtype"]}, "PatchModelAddDownscale": {"input": {"required": {"model": ["MODEL", {}], "block_number": ["INT", {"advanced": true, "default": 3, "min": 1, "max": 32, "step": 1}], "downscale_factor": ["FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}], "start_percent": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"advanced": true, "default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}], "downscale_after_skip": ["BOOLEAN", {"advanced": true, "default": true}], "downscale_method": ["COMBO", {"multiselect": false, "options": ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]}], "upscale_method": ["COMBO", {"multiselect": false, "options": ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]}]}}, "input_order": {"required": ["model", "block_number", "downscale_factor", "start_percent", "end_percent", "downscale_after_skip", "downscale_method", "upscale_method"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "PatchModelAddDownscale", "display_name": "PatchModelAddDownscale (Kohya Deep Shrink)", "description": "", "python_module": "comfy_extras.nodes_model_downscale", "category": "model_patches/unet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ImageCrop": {"input": {"required": {"image": ["IMAGE", {}], "width": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "height": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "x": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "y": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}]}}, "input_order": {"required": ["image", "width", "height", "x", "y"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageCrop", "display_name": "Image Crop (Deprecated)", "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/transform", "output_node": false, "deprecated": true, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["trim"], "essentials_category": "Image Tools", "has_intermediate_output": false}, "ImageCropV2": {"input": {"required": {"image": ["IMAGE", {}], "crop_region": ["BOUNDING_BOX", {"default": {"x": 0, "y": 0, "width": 512, "height": 512}, "socketless": true, "component": "ImageCrop"}]}}, "input_order": {"required": ["image", "crop_region"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageCropV2", "display_name": "Image Crop", "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/transform", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["trim"], "essentials_category": "Image Tools", "has_intermediate_output": true}, "PrimitiveBoundingBox": {"input": {"required": {"x": ["INT", {"default": 0, "min": 0, "max": 16384}], "y": ["INT", {"default": 0, "min": 0, "max": 16384}], "width": ["INT", {"default": 512, "min": 1, "max": 16384}], "height": ["INT", {"default": 512, "min": 1, "max": 16384}]}}, "input_order": {"required": ["x", "y", "width", "height"]}, "is_input_list": false, "output": ["BOUNDING_BOX"], "output_is_list": [false], "output_name": ["BOUNDING_BOX"], "output_tooltips": [null], "output_matchtypes": null, "name": "PrimitiveBoundingBox", "display_name": "Bounding Box", "description": "", "python_module": "comfy_extras.nodes_images", "category": "utils/primitive", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RepeatImageBatch": {"input": {"required": {"image": ["IMAGE", {}], "amount": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["image", "amount"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RepeatImageBatch", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["duplicate image", "clone image"], "essentials_category": null, "has_intermediate_output": false}, "ImageFromBatch": {"input": {"required": {"image": ["IMAGE", {}], "batch_index": ["INT", {"default": 0, "min": 0, "max": 4095}], "length": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["image", "batch_index", "length"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageFromBatch", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["select image", "pick from batch", "extract image"], "essentials_category": null, "has_intermediate_output": false}, "ImageAddNoise": {"input": {"required": {"image": ["IMAGE", {}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "strength": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["image", "seed", "strength"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageAddNoise", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_images", "category": "image", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["film grain"], "essentials_category": null, "has_intermediate_output": false}, "SaveAnimatedWEBP": {"input": {"required": {"images": ["IMAGE", {}], "filename_prefix": ["STRING", {"default": "ComfyUI", "multiline": false}], "fps": ["FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}], "lossless": ["BOOLEAN", {"default": true}], "quality": ["INT", {"default": 80, "min": 0, "max": 100}], "method": ["COMBO", {"multiselect": false, "options": ["default", "fastest", "slowest"]}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["images", "filename_prefix", "fps", "lossless", "quality", "method"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveAnimatedWEBP", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/animation", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SaveAnimatedPNG": {"input": {"required": {"images": ["IMAGE", {}], "filename_prefix": ["STRING", {"default": "ComfyUI", "multiline": false}], "fps": ["FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}], "compress_level": ["INT", {"advanced": true, "default": 4, "min": 0, "max": 9}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["images", "filename_prefix", "fps", "compress_level"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveAnimatedPNG", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/animation", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SaveSVGNode": {"input": {"required": {"svg": ["SVG", {}], "filename_prefix": ["STRING", {"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes.", "default": "svg/ComfyUI", "multiline": false}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["svg", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveSVGNode", "display_name": null, "description": "Save SVG files on disk.", "python_module": "comfy_extras.nodes_images", "category": "image/save", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export vector", "save vector graphics"], "essentials_category": null, "has_intermediate_output": false}, "ImageStitch": {"input": {"required": {"image1": ["IMAGE", {}], "direction": ["COMBO", {"default": "right", "multiselect": false, "options": ["right", "down", "left", "up"]}], "match_image_size": ["BOOLEAN", {"default": true}], "spacing_width": ["INT", {"advanced": true, "default": 0, "min": 0, "max": 1024, "step": 2}], "spacing_color": ["COMBO", {"advanced": true, "default": "white", "multiselect": false, "options": ["white", "black", "red", "green", "blue"]}]}, "optional": {"image2": ["IMAGE", {}]}}, "input_order": {"required": ["image1", "direction", "match_image_size", "spacing_width", "spacing_color"], "optional": ["image2"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageStitch", "display_name": "Image Stitch", "description": "Stitches image2 to image1 in the specified direction.\nIf image2 is not provided, returns image1 unchanged.\nOptional spacing can be added between images.", "python_module": "comfy_extras.nodes_images", "category": "image/transform", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["combine images", "join images", "concatenate images", "side by side"], "essentials_category": null, "has_intermediate_output": false}, "ResizeAndPadImage": {"input": {"required": {"image": ["IMAGE", {}], "target_width": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "target_height": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "padding_color": ["COMBO", {"advanced": true, "multiselect": false, "options": ["white", "black"]}], "interpolation": ["COMBO", {"advanced": true, "multiselect": false, "options": ["area", "bicubic", "nearest-exact", "bilinear", "lanczos"]}]}}, "input_order": {"required": ["image", "target_width", "target_height", "padding_color", "interpolation"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ResizeAndPadImage", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/transform", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["fit to size"], "essentials_category": null, "has_intermediate_output": false}, "GetImageSize": {"input": {"required": {"image": ["IMAGE", {}]}, "hidden": {"unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["INT", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["width", "height", "batch_size"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "GetImageSize", "display_name": "Get Image Size", "description": "Returns width and height of the image, and passes it through unchanged.", "python_module": "comfy_extras.nodes_images", "category": "image", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["dimensions", "resolution", "image info"], "essentials_category": null, "has_intermediate_output": false}, "ImageRotate": {"input": {"required": {"image": ["IMAGE", {}], "rotation": ["COMBO", {"multiselect": false, "options": ["none", "90 degrees", "180 degrees", "270 degrees"]}]}}, "input_order": {"required": ["image", "rotation"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageRotate", "display_name": "Image Rotate", "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/transform", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["turn", "flip orientation"], "essentials_category": "Image Tools", "has_intermediate_output": false}, "ImageFlip": {"input": {"required": {"image": ["IMAGE", {}], "flip_method": ["COMBO", {"multiselect": false, "options": ["x-axis: vertically", "y-axis: horizontally"]}]}}, "input_order": {"required": ["image", "flip_method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageFlip", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/transform", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["mirror", "reflect"], "essentials_category": null, "has_intermediate_output": false}, "ImageScaleToMaxDimension": {"input": {"required": {"image": ["IMAGE", {}], "upscale_method": ["COMBO", {"multiselect": false, "options": ["area", "lanczos", "bilinear", "nearest-exact", "bilinear", "bicubic"]}], "largest_size": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1}]}}, "input_order": {"required": ["image", "upscale_method", "largest_size"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageScaleToMaxDimension", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/upscaling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SplitImageToTileList": {"input": {"required": {"image": ["IMAGE", {}], "tile_width": ["INT", {"default": 1024, "min": 64, "max": 16384}], "tile_height": ["INT", {"default": 1024, "min": 64, "max": 16384}], "overlap": ["INT", {"default": 128, "min": 0, "max": 4096}]}}, "input_order": {"required": ["image", "tile_width", "tile_height", "overlap"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "SplitImageToTileList", "display_name": "Split Image into List of Tiles", "description": "Splits an image into a batched list of tiles with a specified overlap.", "python_module": "comfy_extras.nodes_images", "category": "image/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["split image", "tile image", "slice image"], "essentials_category": null, "has_intermediate_output": false}, "ImageMergeTileList": {"input": {"required": {"image_list": ["IMAGE", {}], "final_width": ["INT", {"default": 1024, "min": 64, "max": 32768}], "final_height": ["INT", {"default": 1024, "min": 64, "max": 32768}], "overlap": ["INT", {"default": 128, "min": 0, "max": 4096}]}}, "input_order": {"required": ["image_list", "final_width", "final_height", "overlap"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageMergeTileList", "display_name": "Merge List of Tiles to Image", "description": "", "python_module": "comfy_extras.nodes_images", "category": "image/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["split image", "tile image", "slice image"], "essentials_category": null, "has_intermediate_output": false}, "ImageOnlyCheckpointLoader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]]}}, "input_order": {"required": ["ckpt_name"]}, "is_input_list": false, "output": ["MODEL", "CLIP_VISION", "VAE"], "output_is_list": [false, false, false], "output_name": ["MODEL", "CLIP_VISION", "VAE"], "name": "ImageOnlyCheckpointLoader", "display_name": "Image Only Checkpoint Loader (img2vid model)", "description": "", "python_module": "comfy_extras.nodes_video_model", "category": "loaders/video_models", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SVD_img2vid_Conditioning": {"input": {"required": {"clip_vision": ["CLIP_VISION"], "init_image": ["IMAGE"], "vae": ["VAE"], "width": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 8}], "height": ["INT", {"default": 576, "min": 16, "max": 16384, "step": 8}], "video_frames": ["INT", {"default": 14, "min": 1, "max": 4096}], "motion_bucket_id": ["INT", {"default": 127, "min": 1, "max": 1023, "advanced": true}], "fps": ["INT", {"default": 6, "min": 1, "max": 1024}], "augmentation_level": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01, "advanced": true}]}}, "input_order": {"required": ["clip_vision", "init_image", "vae", "width", "height", "video_frames", "motion_bucket_id", "fps", "augmentation_level"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "name": "SVD_img2vid_Conditioning", "display_name": "SVD_img2vid_Conditioning", "description": "", "python_module": "comfy_extras.nodes_video_model", "category": "conditioning/video_models", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VideoLinearCFGGuidance": {"input": {"required": {"model": ["MODEL"], "min_cfg": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.5, "round": 0.01, "advanced": true}]}}, "input_order": {"required": ["model", "min_cfg"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "VideoLinearCFGGuidance", "display_name": "VideoLinearCFGGuidance", "description": "", "python_module": "comfy_extras.nodes_video_model", "category": "sampling/video_models", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VideoTriangleCFGGuidance": {"input": {"required": {"model": ["MODEL"], "min_cfg": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.5, "round": 0.01, "advanced": true}]}}, "input_order": {"required": ["model", "min_cfg"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "VideoTriangleCFGGuidance", "display_name": "VideoTriangleCFGGuidance", "description": "", "python_module": "comfy_extras.nodes_video_model", "category": "sampling/video_models", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageOnlyCheckpointSave": {"input": {"required": {"model": ["MODEL"], "clip_vision": ["CLIP_VISION"], "vae": ["VAE"], "filename_prefix": ["STRING", {"default": "checkpoints/ComfyUI"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["model", "clip_vision", "vae", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "ImageOnlyCheckpointSave", "display_name": "ImageOnlyCheckpointSave", "description": "", "python_module": "comfy_extras.nodes_video_model", "category": "advanced/model_merging", "output_node": true, "has_intermediate_output": false, "search_aliases": ["save model", "export checkpoint", "merge save"]}, "ConditioningSetAreaPercentageVideo": {"input": {"required": {"conditioning": ["CONDITIONING"], "width": ["FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}], "height": ["FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}], "temporal": ["FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}], "x": ["FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}], "y": ["FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}], "z": ["FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["conditioning", "width", "height", "temporal", "x", "y", "z", "strength"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetAreaPercentageVideo", "display_name": "ConditioningSetAreaPercentageVideo", "description": "", "python_module": "comfy_extras.nodes_video_model", "category": "conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TrainLoraNode": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model to train the LoRA on."}], "latents": ["LATENT", {"tooltip": "The Latents to use for training, serve as dataset/input of the model."}], "positive": ["CONDITIONING", {"tooltip": "The positive conditioning to use for training."}], "batch_size": ["INT", {"tooltip": "The batch size to use for training.", "default": 1, "min": 1, "max": 10000}], "grad_accumulation_steps": ["INT", {"tooltip": "The number of gradient accumulation steps to use for training.", "default": 1, "min": 1, "max": 1024}], "steps": ["INT", {"tooltip": "The number of steps to train the LoRA for.", "default": 16, "min": 1, "max": 100000}], "learning_rate": ["FLOAT", {"tooltip": "The learning rate to use for training.", "default": 0.0005, "min": 1e-07, "max": 1.0, "step": 1e-07}], "rank": ["INT", {"tooltip": "The rank of the LoRA layers.", "default": 8, "min": 1, "max": 128}], "optimizer": ["COMBO", {"tooltip": "The optimizer to use for training.", "default": "AdamW", "multiselect": false, "options": ["AdamW", "Adam", "SGD", "RMSprop"]}], "loss_function": ["COMBO", {"tooltip": "The loss function to use for training.", "default": "MSE", "multiselect": false, "options": ["MSE", "L1", "Huber", "SmoothL1"]}], "seed": ["INT", {"tooltip": "The seed to use for training (used in generator for LoRA weight initialization and noise sampling)", "default": 0, "min": 0, "max": 18446744073709551615}], "training_dtype": ["COMBO", {"tooltip": "The dtype to use for training. 'none' preserves the model's native compute dtype instead of overriding it. For fp16 models, GradScaler is automatically enabled.", "default": "bf16", "multiselect": false, "options": ["bf16", "fp32", "none"]}], "lora_dtype": ["COMBO", {"tooltip": "The dtype to use for lora.", "default": "bf16", "multiselect": false, "options": ["bf16", "fp32"]}], "quantized_backward": ["BOOLEAN", {"tooltip": "When using training_dtype 'none' and training on quantized model, doing backward with quantized matmul when enabled.", "default": false}], "algorithm": ["COMBO", {"tooltip": "The algorithm to use for training.", "default": "LoRA", "multiselect": false, "options": ["LoRA", "LoHa", "LoKr", "OFT"]}], "gradient_checkpointing": ["BOOLEAN", {"tooltip": "Use gradient checkpointing for training.", "default": true}], "checkpoint_depth": ["INT", {"tooltip": "Depth level for gradient checkpointing.", "default": 1, "min": 1, "max": 5}], "offloading": ["BOOLEAN", {"tooltip": "Offload model weights to CPU during training to save GPU memory.", "default": false}], "existing_lora": ["COMBO", {"tooltip": "The existing LoRA to append to. Set to None for new LoRA.", "default": "[None]", "multiselect": false, "options": ["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors", "[None]"]}], "bucket_mode": ["BOOLEAN", {"tooltip": "Enable resolution bucket mode. When enabled, expects pre-bucketed latents from ResolutionBucket node.", "default": false}], "bypass_mode": ["BOOLEAN", {"tooltip": "Enable bypass mode for training. When enabled, adapters are applied via forward hooks instead of weight modification. Useful for quantized models where weights cannot be directly modified.", "default": false}]}}, "input_order": {"required": ["model", "latents", "positive", "batch_size", "grad_accumulation_steps", "steps", "learning_rate", "rank", "optimizer", "loss_function", "seed", "training_dtype", "lora_dtype", "quantized_backward", "algorithm", "gradient_checkpointing", "checkpoint_depth", "offloading", "existing_lora", "bucket_mode", "bypass_mode"]}, "is_input_list": true, "output": ["LORA_MODEL", "LOSS_MAP", "INT"], "output_is_list": [false, false, false], "output_name": ["lora", "loss_map", "steps"], "output_tooltips": ["LoRA weights", "Loss history", "Total training steps"], "output_matchtypes": null, "name": "TrainLoraNode", "display_name": "Train LoRA", "description": "", "python_module": "comfy_extras.nodes_train", "category": "training", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LoraModelLoader": {"input": {"required": {"model": ["MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}], "lora": ["LORA_MODEL", {"tooltip": "The LoRA model to apply to the diffusion model."}], "strength_model": ["FLOAT", {"tooltip": "How strongly to modify the diffusion model. This value can be negative.", "default": 1.0, "min": -100.0, "max": 100.0}], "bypass": ["BOOLEAN", {"tooltip": "When enabled, applies LoRA in bypass mode without modifying base model weights. Useful for training and when model weights are offloaded.", "default": false}]}}, "input_order": {"required": ["model", "lora", "strength_model", "bypass"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["model"], "output_tooltips": ["The modified diffusion model."], "output_matchtypes": null, "name": "LoraModelLoader", "display_name": "Load LoRA Model", "description": "", "python_module": "comfy_extras.nodes_train", "category": "loaders", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SaveLoRA": {"input": {"required": {"lora": ["LORA_MODEL", {"tooltip": "The LoRA model to save. Do not use the model with LoRA layers."}], "prefix": ["STRING", {"tooltip": "The prefix to use for the saved LoRA file.", "default": "loras/ComfyUI_trained_lora", "multiline": false}]}, "optional": {"steps": ["INT", {"tooltip": "Optional: The number of steps the LoRA has been trained for, used to name the saved file."}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["lora", "prefix"], "optional": ["steps"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveLoRA", "display_name": "Save LoRA Weights", "description": "", "python_module": "comfy_extras.nodes_train", "category": "loaders", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export lora"], "essentials_category": null, "has_intermediate_output": false}, "LossGraphNode": {"input": {"required": {"loss": ["LOSS_MAP", {"tooltip": "Loss map from training node."}], "filename_prefix": ["STRING", {"tooltip": "Prefix for the saved loss graph image.", "default": "loss_graph", "multiline": false}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["loss", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "LossGraphNode", "display_name": "Plot Loss Graph", "description": "", "python_module": "comfy_extras.nodes_train", "category": "training", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["training chart", "training visualization", "plot loss"], "essentials_category": null, "has_intermediate_output": false}, "LoadImageDataSetFromFolder": {"input": {"required": {"folder": ["COMBO", {"tooltip": "The folder to load images from.", "multiselect": false, "options": [".ipynb_checkpoints", "3d"]}]}}, "input_order": {"required": ["folder"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["images"], "output_tooltips": ["List of loaded images"], "output_matchtypes": null, "name": "LoadImageDataSetFromFolder", "display_name": "Load Image Dataset from Folder", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LoadImageTextDataSetFromFolder": {"input": {"required": {"folder": ["COMBO", {"tooltip": "The folder to load images from.", "multiselect": false, "options": [".ipynb_checkpoints", "3d"]}]}}, "input_order": {"required": ["folder"]}, "is_input_list": false, "output": ["IMAGE", "STRING"], "output_is_list": [true, true], "output_name": ["images", "texts"], "output_tooltips": ["List of loaded images", "List of text captions"], "output_matchtypes": null, "name": "LoadImageTextDataSetFromFolder", "display_name": "Load Image and Text Dataset from Folder", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SaveImageDataSetToFolder": {"input": {"required": {"images": ["IMAGE", {"tooltip": "List of images to save."}], "folder_name": ["STRING", {"tooltip": "Name of the folder to save images to (inside output directory).", "default": "dataset", "multiline": false}], "filename_prefix": ["STRING", {"tooltip": "Prefix for saved image filenames.", "advanced": true, "default": "image", "multiline": false}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["images", "folder_name", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": true, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveImageDataSetToFolder", "display_name": "Save Image Dataset to Folder", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SaveImageTextDataSetToFolder": {"input": {"required": {"images": ["IMAGE", {"tooltip": "List of images to save."}], "texts": ["STRING", {"tooltip": "List of text captions to save.", "multiline": false}], "folder_name": ["STRING", {"tooltip": "Name of the folder to save images to (inside output directory).", "default": "dataset", "multiline": false}], "filename_prefix": ["STRING", {"tooltip": "Prefix for saved image filenames.", "advanced": true, "default": "image", "multiline": false}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["images", "texts", "folder_name", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": true, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveImageTextDataSetToFolder", "display_name": "Save Image and Text Dataset to Folder", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ResizeImagesByShorterEdge": {"input": {"required": {"images": ["IMAGE", {"tooltip": "Image to process."}], "shorter_edge": ["INT", {"tooltip": "Target length for the shorter edge.", "default": 512, "min": 1, "max": 8192}]}}, "input_order": {"required": ["images", "shorter_edge"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "ResizeImagesByShorterEdge", "display_name": "Resize Images by Shorter Edge", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ResizeImagesByLongerEdge": {"input": {"required": {"images": ["IMAGE", {"tooltip": "Image to process."}], "longer_edge": ["INT", {"tooltip": "Target length for the longer edge.", "default": 1024, "min": 1, "max": 8192}]}}, "input_order": {"required": ["images", "longer_edge"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "ResizeImagesByLongerEdge", "display_name": "Resize Images by Longer Edge", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CenterCropImages": {"input": {"required": {"images": ["IMAGE", {"tooltip": "Image to process."}], "width": ["INT", {"tooltip": "Crop width.", "default": 512, "min": 1, "max": 8192}], "height": ["INT", {"tooltip": "Crop height.", "default": 512, "min": 1, "max": 8192}]}}, "input_order": {"required": ["images", "width", "height"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "CenterCropImages", "display_name": "Center Crop Images", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RandomCropImages": {"input": {"required": {"images": ["IMAGE", {"tooltip": "Image to process."}], "width": ["INT", {"tooltip": "Crop width.", "default": 512, "min": 1, "max": 8192}], "height": ["INT", {"tooltip": "Crop height.", "default": 512, "min": 1, "max": 8192}], "seed": ["INT", {"tooltip": "Random seed.", "default": 0, "min": 0, "max": 18446744073709551615}]}}, "input_order": {"required": ["images", "width", "height", "seed"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "RandomCropImages", "display_name": "Random Crop Images", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "NormalizeImages": {"input": {"required": {"images": ["IMAGE", {"tooltip": "Image to process."}], "mean": ["FLOAT", {"tooltip": "Mean value for normalization.", "advanced": true, "default": 0.5, "min": 0.0, "max": 1.0}], "std": ["FLOAT", {"tooltip": "Standard deviation for normalization.", "advanced": true, "default": 0.5, "min": 0.001, "max": 1.0}]}}, "input_order": {"required": ["images", "mean", "std"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "NormalizeImages", "display_name": "Normalize Images", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AdjustBrightness": {"input": {"required": {"images": ["IMAGE", {"tooltip": "Image to process."}], "factor": ["FLOAT", {"tooltip": "Brightness factor. 1.0 = no change, <1.0 = darker, >1.0 = brighter.", "default": 1.0, "min": 0.0, "max": 2.0}]}}, "input_order": {"required": ["images", "factor"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "AdjustBrightness", "display_name": "Adjust Brightness", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AdjustContrast": {"input": {"required": {"images": ["IMAGE", {"tooltip": "Image to process."}], "factor": ["FLOAT", {"tooltip": "Contrast factor. 1.0 = no change, <1.0 = less contrast, >1.0 = more contrast.", "default": 1.0, "min": 0.0, "max": 2.0}]}}, "input_order": {"required": ["images", "factor"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "AdjustContrast", "display_name": "Adjust Contrast", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ShuffleDataset": {"input": {"required": {"images": ["IMAGE", {"tooltip": "List of images to process."}], "seed": ["INT", {"tooltip": "Random seed.", "default": 0, "min": 0, "max": 18446744073709551615}]}}, "input_order": {"required": ["images", "seed"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "ShuffleDataset", "display_name": "Shuffle Image Dataset", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ShuffleImageTextDataset": {"input": {"required": {"images": ["IMAGE", {"tooltip": "List of images to shuffle."}], "texts": ["STRING", {"tooltip": "List of texts to shuffle.", "multiline": false}], "seed": ["INT", {"tooltip": "Random seed.", "default": 0, "min": 0, "max": 18446744073709551615}]}}, "input_order": {"required": ["images", "texts", "seed"]}, "is_input_list": true, "output": ["IMAGE", "STRING"], "output_is_list": [true, true], "output_name": ["images", "texts"], "output_tooltips": ["Shuffled images", "Shuffled texts"], "output_matchtypes": null, "name": "ShuffleImageTextDataset", "display_name": "Shuffle Image-Text Dataset", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TextToLowercase": {"input": {"required": {"texts": ["STRING", {"tooltip": "Text to process.", "multiline": false}]}}, "input_order": {"required": ["texts"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [null], "output_name": ["texts"], "output_tooltips": ["Processed texts"], "output_matchtypes": null, "name": "TextToLowercase", "display_name": "Text to Lowercase", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/text", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TextToUppercase": {"input": {"required": {"texts": ["STRING", {"tooltip": "Text to process.", "multiline": false}]}}, "input_order": {"required": ["texts"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [null], "output_name": ["texts"], "output_tooltips": ["Processed texts"], "output_matchtypes": null, "name": "TextToUppercase", "display_name": "Text to Uppercase", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/text", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TruncateText": {"input": {"required": {"texts": ["STRING", {"tooltip": "Text to process.", "multiline": false}], "max_length": ["INT", {"tooltip": "Maximum text length.", "default": 77, "min": 1, "max": 10000}]}}, "input_order": {"required": ["texts", "max_length"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [null], "output_name": ["texts"], "output_tooltips": ["Processed texts"], "output_matchtypes": null, "name": "TruncateText", "display_name": "Truncate Text", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/text", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AddTextPrefix": {"input": {"required": {"texts": ["STRING", {"tooltip": "Text to process.", "multiline": false}], "prefix": ["STRING", {"tooltip": "Prefix to add.", "default": "", "multiline": false}]}}, "input_order": {"required": ["texts", "prefix"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [null], "output_name": ["texts"], "output_tooltips": ["Processed texts"], "output_matchtypes": null, "name": "AddTextPrefix", "display_name": "Add Text Prefix", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/text", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AddTextSuffix": {"input": {"required": {"texts": ["STRING", {"tooltip": "Text to process.", "multiline": false}], "suffix": ["STRING", {"tooltip": "Suffix to add.", "default": "", "multiline": false}]}}, "input_order": {"required": ["texts", "suffix"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [null], "output_name": ["texts"], "output_tooltips": ["Processed texts"], "output_matchtypes": null, "name": "AddTextSuffix", "display_name": "Add Text Suffix", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/text", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ReplaceText": {"input": {"required": {"texts": ["STRING", {"tooltip": "Text to process.", "multiline": false}], "find": ["STRING", {"tooltip": "Text to find.", "default": "", "multiline": false}], "replace": ["STRING", {"tooltip": "Text to replace with.", "default": "", "multiline": false}]}}, "input_order": {"required": ["texts", "find", "replace"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [null], "output_name": ["texts"], "output_tooltips": ["Processed texts"], "output_matchtypes": null, "name": "ReplaceText", "display_name": "Replace Text", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/text", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StripWhitespace": {"input": {"required": {"texts": ["STRING", {"tooltip": "Text to process.", "multiline": false}]}}, "input_order": {"required": ["texts"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [null], "output_name": ["texts"], "output_tooltips": ["Processed texts"], "output_matchtypes": null, "name": "StripWhitespace", "display_name": "Strip Whitespace", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/text", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ImageDeduplication": {"input": {"required": {"images": ["IMAGE", {"tooltip": "List of images to process."}], "similarity_threshold": ["FLOAT", {"tooltip": "Similarity threshold (0-1). Higher means more similar. Images above this threshold are considered duplicates.", "advanced": true, "default": 0.95, "min": 0.0, "max": 1.0}]}}, "input_order": {"required": ["images", "similarity_threshold"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "ImageDeduplication", "display_name": "Image Deduplication", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ImageGrid": {"input": {"required": {"images": ["IMAGE", {"tooltip": "List of images to process."}], "columns": ["INT", {"tooltip": "Number of columns in the grid.", "default": 4, "min": 1, "max": 20}], "cell_width": ["INT", {"tooltip": "Width of each cell in the grid.", "advanced": true, "default": 256, "min": 32, "max": 2048}], "cell_height": ["INT", {"tooltip": "Height of each cell in the grid.", "advanced": true, "default": 256, "min": 32, "max": 2048}], "padding": ["INT", {"tooltip": "Padding between images.", "advanced": true, "default": 4, "min": 0, "max": 50}]}}, "input_order": {"required": ["images", "columns", "cell_width", "cell_height", "padding"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "ImageGrid", "display_name": "Image Grid", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MergeImageLists": {"input": {"required": {"images": ["IMAGE", {"tooltip": "List of images to process."}]}}, "input_order": {"required": ["images"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["images"], "output_tooltips": ["Processed images"], "output_matchtypes": null, "name": "MergeImageLists", "display_name": "Merge Image Lists", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/image", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MergeTextLists": {"input": {"required": {"texts": ["STRING", {"tooltip": "List of texts to process.", "multiline": false}]}}, "input_order": {"required": ["texts"]}, "is_input_list": true, "output": ["STRING"], "output_is_list": [null], "output_name": ["texts"], "output_tooltips": ["Processed texts"], "output_matchtypes": null, "name": "MergeTextLists", "display_name": "Merge Text Lists", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset/text", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MakeTrainingDataset": {"input": {"required": {"images": ["IMAGE", {"tooltip": "List of images to encode."}], "vae": ["VAE", {"tooltip": "VAE model for encoding images to latents."}], "clip": ["CLIP", {"tooltip": "CLIP model for encoding text to conditioning."}]}, "optional": {"texts": ["STRING", {"tooltip": "List of text captions. Can be length n (matching images), 1 (repeated for all), or omitted (uses empty string).", "multiline": false}]}}, "input_order": {"required": ["images", "vae", "clip"], "optional": ["texts"]}, "is_input_list": true, "output": ["LATENT", "CONDITIONING"], "output_is_list": [true, true], "output_name": ["latents", "conditioning"], "output_tooltips": ["List of latent dicts", "List of conditioning lists"], "output_matchtypes": null, "name": "MakeTrainingDataset", "display_name": "Make Training Dataset", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["encode dataset"], "essentials_category": null, "has_intermediate_output": false}, "SaveTrainingDataset": {"input": {"required": {"latents": ["LATENT", {"tooltip": "List of latent dicts from MakeTrainingDataset."}], "conditioning": ["CONDITIONING", {"tooltip": "List of conditioning lists from MakeTrainingDataset."}], "folder_name": ["STRING", {"tooltip": "Name of folder to save dataset (inside output directory).", "default": "training_dataset", "multiline": false}], "shard_size": ["INT", {"tooltip": "Number of samples per shard file.", "advanced": true, "default": 1000, "min": 1, "max": 100000}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["latents", "conditioning", "folder_name", "shard_size"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": true, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveTrainingDataset", "display_name": "Save Training Dataset", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export training data"], "essentials_category": null, "has_intermediate_output": false}, "LoadTrainingDataset": {"input": {"required": {"folder_name": ["STRING", {"tooltip": "Name of folder containing the saved dataset (inside output directory).", "default": "training_dataset", "multiline": false}]}}, "input_order": {"required": ["folder_name"]}, "is_input_list": false, "output": ["LATENT", "CONDITIONING"], "output_is_list": [true, true], "output_name": ["latents", "conditioning"], "output_tooltips": ["List of latent dicts", "List of conditioning lists"], "output_matchtypes": null, "name": "LoadTrainingDataset", "display_name": "Load Training Dataset", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["import dataset", "training data"], "essentials_category": null, "has_intermediate_output": false}, "ResolutionBucket": {"input": {"required": {"latents": ["LATENT", {"tooltip": "List of latent dicts to bucket by resolution."}], "conditioning": ["CONDITIONING", {"tooltip": "List of conditioning lists (must match latents length)."}]}}, "input_order": {"required": ["latents", "conditioning"]}, "is_input_list": true, "output": ["LATENT", "CONDITIONING"], "output_is_list": [true, true], "output_name": ["latents", "conditioning"], "output_tooltips": ["List of batched latent dicts, one per resolution bucket.", "List of condition lists, one per resolution bucket."], "output_matchtypes": null, "name": "ResolutionBucket", "display_name": "Resolution Bucket", "description": "", "python_module": "comfy_extras.nodes_dataset", "category": "dataset", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SelfAttentionGuidance": {"input": {"required": {"model": ["MODEL", {}], "scale": ["FLOAT", {"default": 0.5, "min": -2.0, "max": 5.0, "step": 0.01}], "blur_sigma": ["FLOAT", {"advanced": true, "default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}]}}, "input_order": {"required": ["model", "scale", "blur_sigma"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "SelfAttentionGuidance", "display_name": "Self-Attention Guidance", "description": "", "python_module": "comfy_extras.nodes_sag", "category": "_for_testing", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PerpNeg": {"input": {"required": {"model": ["MODEL", {}], "empty_conditioning": ["CONDITIONING", {}], "neg_scale": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "empty_conditioning", "neg_scale"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "PerpNeg", "display_name": "Perp-Neg (DEPRECATED by PerpNegGuider)", "description": "", "python_module": "comfy_extras.nodes_perpneg", "category": "_for_testing", "output_node": false, "deprecated": true, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PerpNegGuider": {"input": {"required": {"model": ["MODEL", {}], "positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "empty_conditioning": ["CONDITIONING", {}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}], "neg_scale": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "positive", "negative", "empty_conditioning", "cfg", "neg_scale"]}, "is_input_list": false, "output": ["GUIDER"], "output_is_list": [false], "output_name": ["GUIDER"], "output_tooltips": [null], "output_matchtypes": null, "name": "PerpNegGuider", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_perpneg", "category": "_for_testing", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StableZero123_Conditioning": {"input": {"required": {"clip_vision": ["CLIP_VISION", {}], "init_image": ["IMAGE", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "height": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "elevation": ["FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": false}], "azimuth": ["FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": false}]}}, "input_order": {"required": ["clip_vision", "init_image", "vae", "width", "height", "batch_size", "elevation", "azimuth"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "StableZero123_Conditioning", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_stable3d", "category": "conditioning/3d_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StableZero123_Conditioning_Batched": {"input": {"required": {"clip_vision": ["CLIP_VISION", {}], "init_image": ["IMAGE", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "height": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "elevation": ["FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": false}], "azimuth": ["FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": false}], "elevation_batch_increment": ["FLOAT", {"advanced": true, "default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": false}], "azimuth_batch_increment": ["FLOAT", {"advanced": true, "default": 0.0, "min": -180.0, "max": 180.0, "step": 0.1, "round": false}]}}, "input_order": {"required": ["clip_vision", "init_image", "vae", "width", "height", "batch_size", "elevation", "azimuth", "elevation_batch_increment", "azimuth_batch_increment"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "StableZero123_Conditioning_Batched", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_stable3d", "category": "conditioning/3d_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SV3D_Conditioning": {"input": {"required": {"clip_vision": ["CLIP_VISION", {}], "init_image": ["IMAGE", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 576, "min": 16, "max": 16384, "step": 8}], "height": ["INT", {"default": 576, "min": 16, "max": 16384, "step": 8}], "video_frames": ["INT", {"default": 21, "min": 1, "max": 4096}], "elevation": ["FLOAT", {"default": 0.0, "min": -90.0, "max": 90.0, "step": 0.1, "round": false}]}}, "input_order": {"required": ["clip_vision", "init_image", "vae", "width", "height", "video_frames", "elevation"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "SV3D_Conditioning", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_stable3d", "category": "conditioning/3d_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SD_4XUpscale_Conditioning": {"input": {"required": {"images": ["IMAGE", {}], "positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "scale_ratio": ["FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}], "noise_augmentation": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["images", "positive", "negative", "scale_ratio", "noise_augmentation"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "SD_4XUpscale_Conditioning", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_sdupscale", "category": "conditioning/upscale_diffusion", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PhotoMakerLoader": {"input": {"required": {"photomaker_model_name": ["COMBO", {"multiselect": false, "options": ["photomaker-v1.bin", "photomaker-v2.bin"]}]}}, "input_order": {"required": ["photomaker_model_name"]}, "is_input_list": false, "output": ["PHOTOMAKER"], "output_is_list": [false], "output_name": ["PHOTOMAKER"], "output_tooltips": [null], "output_matchtypes": null, "name": "PhotoMakerLoader", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_photomaker", "category": "_for_testing/photomaker", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PhotoMakerEncode": {"input": {"required": {"photomaker": ["PHOTOMAKER", {}], "image": ["IMAGE", {}], "clip": ["CLIP", {}], "text": ["STRING", {"default": "photograph of photomaker", "multiline": true, "dynamicPrompts": true}]}}, "input_order": {"required": ["photomaker", "image", "clip", "text"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "PhotoMakerEncode", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_photomaker", "category": "_for_testing/photomaker", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodePixArtAlpha": {"input": {"required": {"width": ["INT", {"default": 1024, "min": 0, "max": 16384}], "height": ["INT", {"default": 1024, "min": 0, "max": 16384}], "text": ["STRING", {"multiline": true, "dynamicPrompts": true}], "clip": ["CLIP", {}]}}, "input_order": {"required": ["width", "height", "text", "clip"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodePixArtAlpha", "display_name": null, "description": "Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.", "python_module": "comfy_extras.nodes_pixart", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["pixart prompt"], "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodeControlnet": {"input": {"required": {"clip": ["CLIP", {}], "conditioning": ["CONDITIONING", {}], "text": ["STRING", {"multiline": true, "dynamicPrompts": true}]}}, "input_order": {"required": ["clip", "conditioning", "text"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodeControlnet", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_cond", "category": "_for_testing/conditioning", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "T5TokenizerOptions": {"input": {"required": {"clip": ["CLIP", {}], "min_padding": ["INT", {"advanced": true, "default": 0, "min": 0, "max": 10000, "step": 1}], "min_length": ["INT", {"advanced": true, "default": 0, "min": 0, "max": 10000, "step": 1}]}}, "input_order": {"required": ["clip", "min_padding", "min_length"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "output_tooltips": [null], "output_matchtypes": null, "name": "T5TokenizerOptions", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_cond", "category": "_for_testing/conditioning", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Morphology": {"input": {"required": {"image": ["IMAGE", {}], "operation": ["COMBO", {"multiselect": false, "options": ["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"]}], "kernel_size": ["INT", {"default": 3, "min": 3, "max": 999, "step": 1}]}}, "input_order": {"required": ["image", "operation", "kernel_size"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "Morphology", "display_name": "ImageMorphology", "description": "", "python_module": "comfy_extras.nodes_morphology", "category": "image/postprocessing", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["erode", "dilate"], "essentials_category": null, "has_intermediate_output": false}, "ImageRGBToYUV": {"input": {"required": {"image": ["IMAGE", {}]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["Y", "U", "V"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "ImageRGBToYUV", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_morphology", "category": "image/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["color space conversion"], "essentials_category": null, "has_intermediate_output": false}, "ImageYUVToRGB": {"input": {"required": {"Y": ["IMAGE", {}], "U": ["IMAGE", {}], "V": ["IMAGE", {}]}}, "input_order": {"required": ["Y", "U", "V"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ImageYUVToRGB", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_morphology", "category": "image/batch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["color space conversion"], "essentials_category": null, "has_intermediate_output": false}, "StableCascade_EmptyLatentImage": {"input": {"required": {"width": ["INT", {"default": 1024, "min": 256, "max": 16384, "step": 8}], "height": ["INT", {"default": 1024, "min": 256, "max": 16384, "step": 8}], "compression": ["INT", {"advanced": true, "default": 42, "min": 4, "max": 128, "step": 1}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "compression", "batch_size"]}, "is_input_list": false, "output": ["LATENT", "LATENT"], "output_is_list": [false, false], "output_name": ["stage_c", "stage_b"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "StableCascade_EmptyLatentImage", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_stable_cascade", "category": "latent/stable_cascade", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StableCascade_StageB_Conditioning": {"input": {"required": {"conditioning": ["CONDITIONING", {}], "stage_c": ["LATENT", {}]}}, "input_order": {"required": ["conditioning", "stage_c"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "StableCascade_StageB_Conditioning", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_stable_cascade", "category": "conditioning/stable_cascade", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StableCascade_StageC_VAEEncode": {"input": {"required": {"image": ["IMAGE", {}], "vae": ["VAE", {}], "compression": ["INT", {"advanced": true, "default": 42, "min": 4, "max": 128, "step": 1}]}}, "input_order": {"required": ["image", "vae", "compression"]}, "is_input_list": false, "output": ["LATENT", "LATENT"], "output_is_list": [false, false], "output_name": ["stage_c", "stage_b"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "StableCascade_StageC_VAEEncode", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_stable_cascade", "category": "latent/stable_cascade", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StableCascade_SuperResolutionControlnet": {"input": {"required": {"image": ["IMAGE", {}], "vae": ["VAE", {}]}}, "input_order": {"required": ["image", "vae"]}, "is_input_list": false, "output": ["IMAGE", "LATENT", "LATENT"], "output_is_list": [false, false, false], "output_name": ["controlnet_input", "stage_c", "stage_b"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "StableCascade_SuperResolutionControlnet", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_stable_cascade", "category": "_for_testing/stable_cascade", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "DifferentialDiffusion": {"input": {"required": {"model": ["MODEL", {}]}, "optional": {"strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model"], "optional": ["strength"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "DifferentialDiffusion", "display_name": "Differential Diffusion", "description": "", "python_module": "comfy_extras.nodes_differential_diffusion", "category": "_for_testing", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["inpaint gradient", "variable denoise strength"], "essentials_category": null, "has_intermediate_output": false}, "InstructPixToPixConditioning": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "pixels": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "pixels"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "InstructPixToPixConditioning", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_ip2p", "category": "conditioning/instructpix2pix", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ModelMergeSD1": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "time_embed.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "label_emb.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "out.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "time_embed.", "label_emb.", "input_blocks.0.", "input_blocks.1.", "input_blocks.2.", "input_blocks.3.", "input_blocks.4.", "input_blocks.5.", "input_blocks.6.", "input_blocks.7.", "input_blocks.8.", "input_blocks.9.", "input_blocks.10.", "input_blocks.11.", "middle_block.0.", "middle_block.1.", "middle_block.2.", "output_blocks.0.", "output_blocks.1.", "output_blocks.2.", "output_blocks.3.", "output_blocks.4.", "output_blocks.5.", "output_blocks.6.", "output_blocks.7.", "output_blocks.8.", "output_blocks.9.", "output_blocks.10.", "output_blocks.11.", "out."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeSD1", "display_name": "ModelMergeSD1", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeSD2": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "time_embed.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "label_emb.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "out.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "time_embed.", "label_emb.", "input_blocks.0.", "input_blocks.1.", "input_blocks.2.", "input_blocks.3.", "input_blocks.4.", "input_blocks.5.", "input_blocks.6.", "input_blocks.7.", "input_blocks.8.", "input_blocks.9.", "input_blocks.10.", "input_blocks.11.", "middle_block.0.", "middle_block.1.", "middle_block.2.", "output_blocks.0.", "output_blocks.1.", "output_blocks.2.", "output_blocks.3.", "output_blocks.4.", "output_blocks.5.", "output_blocks.6.", "output_blocks.7.", "output_blocks.8.", "output_blocks.9.", "output_blocks.10.", "output_blocks.11.", "out."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeSD2", "display_name": "ModelMergeSD2", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeSDXL": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "time_embed.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "label_emb.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.0": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.1": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.2": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.3": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.4": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.5": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.6": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.7": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "input_blocks.8": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.0": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.1": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "middle_block.2": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.0": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.1": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.2": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.3": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.4": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.5": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.6": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.7": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "output_blocks.8": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "out.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "time_embed.", "label_emb.", "input_blocks.0", "input_blocks.1", "input_blocks.2", "input_blocks.3", "input_blocks.4", "input_blocks.5", "input_blocks.6", "input_blocks.7", "input_blocks.8", "middle_block.0", "middle_block.1", "middle_block.2", "output_blocks.0", "output_blocks.1", "output_blocks.2", "output_blocks.3", "output_blocks.4", "output_blocks.5", "output_blocks.6", "output_blocks.7", "output_blocks.8", "out."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeSDXL", "display_name": "ModelMergeSDXL", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeSD3_2B": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "pos_embed.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "x_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "context_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "y_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_layer.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "pos_embed.", "x_embedder.", "context_embedder.", "y_embedder.", "t_embedder.", "joint_blocks.0.", "joint_blocks.1.", "joint_blocks.2.", "joint_blocks.3.", "joint_blocks.4.", "joint_blocks.5.", "joint_blocks.6.", "joint_blocks.7.", "joint_blocks.8.", "joint_blocks.9.", "joint_blocks.10.", "joint_blocks.11.", "joint_blocks.12.", "joint_blocks.13.", "joint_blocks.14.", "joint_blocks.15.", "joint_blocks.16.", "joint_blocks.17.", "joint_blocks.18.", "joint_blocks.19.", "joint_blocks.20.", "joint_blocks.21.", "joint_blocks.22.", "joint_blocks.23.", "final_layer."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeSD3_2B", "display_name": "ModelMergeSD3_2B", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeAuraflow": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "init_x_linear.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "positional_encoding": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "cond_seq_linear.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "register_tokens": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_layers.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_layers.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_layers.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_layers.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.28.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.29.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.30.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_layers.31.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "modF.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_linear.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "init_x_linear.", "positional_encoding", "cond_seq_linear.", "register_tokens", "t_embedder.", "double_layers.0.", "double_layers.1.", "double_layers.2.", "double_layers.3.", "single_layers.0.", "single_layers.1.", "single_layers.2.", "single_layers.3.", "single_layers.4.", "single_layers.5.", "single_layers.6.", "single_layers.7.", "single_layers.8.", "single_layers.9.", "single_layers.10.", "single_layers.11.", "single_layers.12.", "single_layers.13.", "single_layers.14.", "single_layers.15.", "single_layers.16.", "single_layers.17.", "single_layers.18.", "single_layers.19.", "single_layers.20.", "single_layers.21.", "single_layers.22.", "single_layers.23.", "single_layers.24.", "single_layers.25.", "single_layers.26.", "single_layers.27.", "single_layers.28.", "single_layers.29.", "single_layers.30.", "single_layers.31.", "modF.", "final_linear."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeAuraflow", "display_name": "ModelMergeAuraflow", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeFlux1": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "img_in.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "time_in.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "guidance_in": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "vector_in.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "txt_in.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "double_blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.28.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.29.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.30.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.31.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.32.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.33.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.34.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.35.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.36.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "single_blocks.37.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_layer.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "img_in.", "time_in.", "guidance_in", "vector_in.", "txt_in.", "double_blocks.0.", "double_blocks.1.", "double_blocks.2.", "double_blocks.3.", "double_blocks.4.", "double_blocks.5.", "double_blocks.6.", "double_blocks.7.", "double_blocks.8.", "double_blocks.9.", "double_blocks.10.", "double_blocks.11.", "double_blocks.12.", "double_blocks.13.", "double_blocks.14.", "double_blocks.15.", "double_blocks.16.", "double_blocks.17.", "double_blocks.18.", "single_blocks.0.", "single_blocks.1.", "single_blocks.2.", "single_blocks.3.", "single_blocks.4.", "single_blocks.5.", "single_blocks.6.", "single_blocks.7.", "single_blocks.8.", "single_blocks.9.", "single_blocks.10.", "single_blocks.11.", "single_blocks.12.", "single_blocks.13.", "single_blocks.14.", "single_blocks.15.", "single_blocks.16.", "single_blocks.17.", "single_blocks.18.", "single_blocks.19.", "single_blocks.20.", "single_blocks.21.", "single_blocks.22.", "single_blocks.23.", "single_blocks.24.", "single_blocks.25.", "single_blocks.26.", "single_blocks.27.", "single_blocks.28.", "single_blocks.29.", "single_blocks.30.", "single_blocks.31.", "single_blocks.32.", "single_blocks.33.", "single_blocks.34.", "single_blocks.35.", "single_blocks.36.", "single_blocks.37.", "final_layer."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeFlux1", "display_name": "ModelMergeFlux1", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeSD35_Large": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "pos_embed.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "x_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "context_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "y_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.28.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.29.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.30.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.31.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.32.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.33.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.34.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.35.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.36.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "joint_blocks.37.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_layer.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "pos_embed.", "x_embedder.", "context_embedder.", "y_embedder.", "t_embedder.", "joint_blocks.0.", "joint_blocks.1.", "joint_blocks.2.", "joint_blocks.3.", "joint_blocks.4.", "joint_blocks.5.", "joint_blocks.6.", "joint_blocks.7.", "joint_blocks.8.", "joint_blocks.9.", "joint_blocks.10.", "joint_blocks.11.", "joint_blocks.12.", "joint_blocks.13.", "joint_blocks.14.", "joint_blocks.15.", "joint_blocks.16.", "joint_blocks.17.", "joint_blocks.18.", "joint_blocks.19.", "joint_blocks.20.", "joint_blocks.21.", "joint_blocks.22.", "joint_blocks.23.", "joint_blocks.24.", "joint_blocks.25.", "joint_blocks.26.", "joint_blocks.27.", "joint_blocks.28.", "joint_blocks.29.", "joint_blocks.30.", "joint_blocks.31.", "joint_blocks.32.", "joint_blocks.33.", "joint_blocks.34.", "joint_blocks.35.", "joint_blocks.36.", "joint_blocks.37.", "final_layer."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeSD35_Large", "display_name": "ModelMergeSD35_Large", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeMochiPreview": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "pos_frequencies.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t5_y_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t5_yproj.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.28.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.29.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.30.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.31.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.32.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.33.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.34.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.35.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.36.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.37.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.38.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.39.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.40.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.41.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.42.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.43.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.44.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.45.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.46.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.47.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_layer.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "pos_frequencies.", "t_embedder.", "t5_y_embedder.", "t5_yproj.", "blocks.0.", "blocks.1.", "blocks.2.", "blocks.3.", "blocks.4.", "blocks.5.", "blocks.6.", "blocks.7.", "blocks.8.", "blocks.9.", "blocks.10.", "blocks.11.", "blocks.12.", "blocks.13.", "blocks.14.", "blocks.15.", "blocks.16.", "blocks.17.", "blocks.18.", "blocks.19.", "blocks.20.", "blocks.21.", "blocks.22.", "blocks.23.", "blocks.24.", "blocks.25.", "blocks.26.", "blocks.27.", "blocks.28.", "blocks.29.", "blocks.30.", "blocks.31.", "blocks.32.", "blocks.33.", "blocks.34.", "blocks.35.", "blocks.36.", "blocks.37.", "blocks.38.", "blocks.39.", "blocks.40.", "blocks.41.", "blocks.42.", "blocks.43.", "blocks.44.", "blocks.45.", "blocks.46.", "blocks.47.", "final_layer."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeMochiPreview", "display_name": "ModelMergeMochiPreview", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeLTXV": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "patchify_proj.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "adaln_single.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "caption_projection.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "scale_shift_table": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "proj_out.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "patchify_proj.", "adaln_single.", "caption_projection.", "transformer_blocks.0.", "transformer_blocks.1.", "transformer_blocks.2.", "transformer_blocks.3.", "transformer_blocks.4.", "transformer_blocks.5.", "transformer_blocks.6.", "transformer_blocks.7.", "transformer_blocks.8.", "transformer_blocks.9.", "transformer_blocks.10.", "transformer_blocks.11.", "transformer_blocks.12.", "transformer_blocks.13.", "transformer_blocks.14.", "transformer_blocks.15.", "transformer_blocks.16.", "transformer_blocks.17.", "transformer_blocks.18.", "transformer_blocks.19.", "transformer_blocks.20.", "transformer_blocks.21.", "transformer_blocks.22.", "transformer_blocks.23.", "transformer_blocks.24.", "transformer_blocks.25.", "transformer_blocks.26.", "transformer_blocks.27.", "scale_shift_table", "proj_out."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeLTXV", "display_name": "ModelMergeLTXV", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeCosmos7B": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "pos_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "extra_pos_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "x_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "affline_norm.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_layer.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "pos_embedder.", "extra_pos_embedder.", "x_embedder.", "t_embedder.", "affline_norm.", "blocks.block0.", "blocks.block1.", "blocks.block2.", "blocks.block3.", "blocks.block4.", "blocks.block5.", "blocks.block6.", "blocks.block7.", "blocks.block8.", "blocks.block9.", "blocks.block10.", "blocks.block11.", "blocks.block12.", "blocks.block13.", "blocks.block14.", "blocks.block15.", "blocks.block16.", "blocks.block17.", "blocks.block18.", "blocks.block19.", "blocks.block20.", "blocks.block21.", "blocks.block22.", "blocks.block23.", "blocks.block24.", "blocks.block25.", "blocks.block26.", "blocks.block27.", "final_layer."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeCosmos7B", "display_name": "ModelMergeCosmos7B", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeCosmos14B": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "pos_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "extra_pos_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "x_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "affline_norm.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block28.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block29.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block30.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block31.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block32.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block33.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block34.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.block35.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_layer.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "pos_embedder.", "extra_pos_embedder.", "x_embedder.", "t_embedder.", "affline_norm.", "blocks.block0.", "blocks.block1.", "blocks.block2.", "blocks.block3.", "blocks.block4.", "blocks.block5.", "blocks.block6.", "blocks.block7.", "blocks.block8.", "blocks.block9.", "blocks.block10.", "blocks.block11.", "blocks.block12.", "blocks.block13.", "blocks.block14.", "blocks.block15.", "blocks.block16.", "blocks.block17.", "blocks.block18.", "blocks.block19.", "blocks.block20.", "blocks.block21.", "blocks.block22.", "blocks.block23.", "blocks.block24.", "blocks.block25.", "blocks.block26.", "blocks.block27.", "blocks.block28.", "blocks.block29.", "blocks.block30.", "blocks.block31.", "blocks.block32.", "blocks.block33.", "blocks.block34.", "blocks.block35.", "final_layer."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeCosmos14B", "display_name": "ModelMergeCosmos14B", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeWAN2_1": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "patch_embedding.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "time_embedding.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "time_projection.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "text_embedding.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "img_emb.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.28.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.29.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.30.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.31.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.32.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.33.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.34.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.35.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.36.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.37.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.38.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.39.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "head.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "patch_embedding.", "time_embedding.", "time_projection.", "text_embedding.", "img_emb.", "blocks.0.", "blocks.1.", "blocks.2.", "blocks.3.", "blocks.4.", "blocks.5.", "blocks.6.", "blocks.7.", "blocks.8.", "blocks.9.", "blocks.10.", "blocks.11.", "blocks.12.", "blocks.13.", "blocks.14.", "blocks.15.", "blocks.16.", "blocks.17.", "blocks.18.", "blocks.19.", "blocks.20.", "blocks.21.", "blocks.22.", "blocks.23.", "blocks.24.", "blocks.25.", "blocks.26.", "blocks.27.", "blocks.28.", "blocks.29.", "blocks.30.", "blocks.31.", "blocks.32.", "blocks.33.", "blocks.34.", "blocks.35.", "blocks.36.", "blocks.37.", "blocks.38.", "blocks.39.", "head."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeWAN2_1", "display_name": "ModelMergeWAN2_1", "description": "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb.", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeCosmosPredict2_2B": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "pos_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "x_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedding_norm.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_layer.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "pos_embedder.", "x_embedder.", "t_embedder.", "t_embedding_norm.", "blocks.0.", "blocks.1.", "blocks.2.", "blocks.3.", "blocks.4.", "blocks.5.", "blocks.6.", "blocks.7.", "blocks.8.", "blocks.9.", "blocks.10.", "blocks.11.", "blocks.12.", "blocks.13.", "blocks.14.", "blocks.15.", "blocks.16.", "blocks.17.", "blocks.18.", "blocks.19.", "blocks.20.", "blocks.21.", "blocks.22.", "blocks.23.", "blocks.24.", "blocks.25.", "blocks.26.", "blocks.27.", "final_layer."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeCosmosPredict2_2B", "display_name": "ModelMergeCosmosPredict2_2B", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeCosmosPredict2_14B": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "pos_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "x_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedder.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "t_embedding_norm.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.28.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.29.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.30.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.31.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.32.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.33.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.34.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "blocks.35.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "final_layer.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "pos_embedder.", "x_embedder.", "t_embedder.", "t_embedding_norm.", "blocks.0.", "blocks.1.", "blocks.2.", "blocks.3.", "blocks.4.", "blocks.5.", "blocks.6.", "blocks.7.", "blocks.8.", "blocks.9.", "blocks.10.", "blocks.11.", "blocks.12.", "blocks.13.", "blocks.14.", "blocks.15.", "blocks.16.", "blocks.17.", "blocks.18.", "blocks.19.", "blocks.20.", "blocks.21.", "blocks.22.", "blocks.23.", "blocks.24.", "blocks.25.", "blocks.26.", "blocks.27.", "blocks.28.", "blocks.29.", "blocks.30.", "blocks.31.", "blocks.32.", "blocks.33.", "blocks.34.", "blocks.35.", "final_layer."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeCosmosPredict2_14B", "display_name": "ModelMergeCosmosPredict2_14B", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelMergeQwenImage": {"input": {"required": {"model1": ["MODEL"], "model2": ["MODEL"], "pos_embeds.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "img_in.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "txt_norm.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "txt_in.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "time_text_embed.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.0.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.1.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.2.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.3.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.4.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.5.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.6.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.7.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.8.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.9.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.10.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.11.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.12.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.13.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.14.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.15.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.16.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.17.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.18.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.19.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.20.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.21.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.22.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.23.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.24.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.25.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.26.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.27.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.28.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.29.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.30.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.31.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.32.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.33.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.34.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.35.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.36.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.37.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.38.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.39.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.40.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.41.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.42.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.43.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.44.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.45.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.46.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.47.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.48.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.49.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.50.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.51.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.52.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.53.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.54.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.55.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.56.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.57.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.58.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "transformer_blocks.59.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "proj_out.": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model1", "model2", "pos_embeds.", "img_in.", "txt_norm.", "txt_in.", "time_text_embed.", "transformer_blocks.0.", "transformer_blocks.1.", "transformer_blocks.2.", "transformer_blocks.3.", "transformer_blocks.4.", "transformer_blocks.5.", "transformer_blocks.6.", "transformer_blocks.7.", "transformer_blocks.8.", "transformer_blocks.9.", "transformer_blocks.10.", "transformer_blocks.11.", "transformer_blocks.12.", "transformer_blocks.13.", "transformer_blocks.14.", "transformer_blocks.15.", "transformer_blocks.16.", "transformer_blocks.17.", "transformer_blocks.18.", "transformer_blocks.19.", "transformer_blocks.20.", "transformer_blocks.21.", "transformer_blocks.22.", "transformer_blocks.23.", "transformer_blocks.24.", "transformer_blocks.25.", "transformer_blocks.26.", "transformer_blocks.27.", "transformer_blocks.28.", "transformer_blocks.29.", "transformer_blocks.30.", "transformer_blocks.31.", "transformer_blocks.32.", "transformer_blocks.33.", "transformer_blocks.34.", "transformer_blocks.35.", "transformer_blocks.36.", "transformer_blocks.37.", "transformer_blocks.38.", "transformer_blocks.39.", "transformer_blocks.40.", "transformer_blocks.41.", "transformer_blocks.42.", "transformer_blocks.43.", "transformer_blocks.44.", "transformer_blocks.45.", "transformer_blocks.46.", "transformer_blocks.47.", "transformer_blocks.48.", "transformer_blocks.49.", "transformer_blocks.50.", "transformer_blocks.51.", "transformer_blocks.52.", "transformer_blocks.53.", "transformer_blocks.54.", "transformer_blocks.55.", "transformer_blocks.56.", "transformer_blocks.57.", "transformer_blocks.58.", "transformer_blocks.59.", "proj_out."]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelMergeQwenImage", "display_name": "ModelMergeQwenImage", "description": "", "python_module": "comfy_extras.nodes_model_merging_model_specific", "category": "advanced/model_merging/model_specific", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PerturbedAttentionGuidance": {"input": {"required": {"model": ["MODEL", {}], "scale": ["FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": 0.01}]}}, "input_order": {"required": ["model", "scale"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "PerturbedAttentionGuidance", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_pag", "category": "model_patches/unet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AlignYourStepsScheduler": {"input": {"required": {"model_type": ["COMBO", {"multiselect": false, "options": ["SD1", "SDXL", "SVD"]}], "steps": ["INT", {"default": 10, "min": 1, "max": 10000}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model_type", "steps", "denoise"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "AlignYourStepsScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_align_your_steps", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["AYS scheduler"], "essentials_category": null, "has_intermediate_output": false}, "UNetSelfAttentionMultiply": {"input": {"required": {"model": ["MODEL", {}], "q": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "k": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "v": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "out": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "q", "k", "v", "out"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "UNetSelfAttentionMultiply", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_attention_multiply", "category": "_for_testing/attention_experiments", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "UNetCrossAttentionMultiply": {"input": {"required": {"model": ["MODEL", {}], "q": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "k": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "v": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "out": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "q", "k", "v", "out"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "UNetCrossAttentionMultiply", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_attention_multiply", "category": "_for_testing/attention_experiments", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CLIPAttentionMultiply": {"input": {"required": {"clip": ["CLIP", {}], "q": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "k": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "v": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "out": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["clip", "q", "k", "v", "out"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPAttentionMultiply", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_attention_multiply", "category": "_for_testing/attention_experiments", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["clip attention scale", "text encoder attention"], "essentials_category": null, "has_intermediate_output": false}, "UNetTemporalAttentionMultiply": {"input": {"required": {"model": ["MODEL", {}], "self_structural": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "self_temporal": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "cross_structural": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "cross_temporal": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "self_structural", "self_temporal", "cross_structural", "cross_temporal"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "UNetTemporalAttentionMultiply", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_attention_multiply", "category": "_for_testing/attention_experiments", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerLCMUpscale": {"input": {"required": {"scale_ratio": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.1, "max": 20.0, "step": 0.01}], "scale_steps": ["INT", {"advanced": true, "default": -1, "min": -1, "max": 1000, "step": 1}], "upscale_method": ["COMBO", {"multiselect": false, "options": ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"]}]}}, "input_order": {"required": ["scale_ratio", "scale_steps", "upscale_method"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerLCMUpscale", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_advanced_samplers", "category": "sampling/custom_sampling/samplers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SamplerEulerCFGpp": {"input": {"required": {"version": ["COMBO", {"advanced": true, "multiselect": false, "options": ["regular", "alternative"]}]}}, "input_order": {"required": ["version"]}, "is_input_list": false, "output": ["SAMPLER"], "output_is_list": [false], "output_name": ["SAMPLER"], "output_tooltips": [null], "output_matchtypes": null, "name": "SamplerEulerCFGpp", "display_name": "SamplerEulerCFG++", "description": "", "python_module": "comfy_extras.nodes_advanced_samplers", "category": "_for_testing", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WebcamCapture": {"input": {"required": {"image": ["WEBCAM", {}], "width": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "height": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "capture_on_queue": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["image", "width", "height", "capture_on_queue"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "WebcamCapture", "display_name": "Webcam Capture", "description": "", "python_module": "comfy_extras.nodes_webcam", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": ["camera input", "live capture", "camera feed", "snapshot"], "essentials_category": "Basics"}, "EmptyLatentAudio": {"input": {"required": {"seconds": ["FLOAT", {"default": 47.6, "min": 1.0, "max": 1000.0, "step": 0.1}], "batch_size": ["INT", {"tooltip": "The number of latent images in the batch.", "default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["seconds", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyLatentAudio", "display_name": "Empty Latent Audio", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "latent/audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": "Audio", "has_intermediate_output": false}, "VAEEncodeAudio": {"input": {"required": {"audio": ["AUDIO", {}], "vae": ["VAE", {}]}}, "input_order": {"required": ["audio", "vae"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "VAEEncodeAudio", "display_name": "VAE Encode Audio", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "latent/audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["audio to latent"], "essentials_category": null, "has_intermediate_output": false}, "VAEDecodeAudio": {"input": {"required": {"samples": ["LATENT", {}], "vae": ["VAE", {}]}}, "input_order": {"required": ["samples", "vae"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "VAEDecodeAudio", "display_name": "VAE Decode Audio", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "latent/audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["latent to audio"], "essentials_category": null, "has_intermediate_output": false}, "VAEDecodeAudioTiled": {"input": {"required": {"samples": ["LATENT", {}], "vae": ["VAE", {}], "tile_size": ["INT", {"default": 512, "min": 32, "max": 8192, "step": 8}], "overlap": ["INT", {"default": 64, "min": 0, "max": 1024, "step": 8}]}}, "input_order": {"required": ["samples", "vae", "tile_size", "overlap"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "VAEDecodeAudioTiled", "display_name": "VAE Decode Audio (Tiled)", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "latent/audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["latent to audio"], "essentials_category": null, "has_intermediate_output": false}, "SaveAudio": {"input": {"required": {"audio": ["AUDIO", {}], "filename_prefix": ["STRING", {"default": "audio/ComfyUI", "multiline": false}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["audio", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveAudio", "display_name": "Save Audio (FLAC)", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export flac"], "essentials_category": "Audio", "has_intermediate_output": false}, "SaveAudioMP3": {"input": {"required": {"audio": ["AUDIO", {}], "filename_prefix": ["STRING", {"default": "audio/ComfyUI", "multiline": false}], "quality": ["COMBO", {"default": "V0", "multiselect": false, "options": ["V0", "128k", "320k"]}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["audio", "filename_prefix", "quality"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveAudioMP3", "display_name": "Save Audio (MP3)", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export mp3"], "essentials_category": "Audio", "has_intermediate_output": false}, "SaveAudioOpus": {"input": {"required": {"audio": ["AUDIO", {}], "filename_prefix": ["STRING", {"default": "audio/ComfyUI", "multiline": false}], "quality": ["COMBO", {"default": "128k", "multiselect": false, "options": ["64k", "96k", "128k", "192k", "320k"]}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["audio", "filename_prefix", "quality"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveAudioOpus", "display_name": "Save Audio (Opus)", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export opus"], "essentials_category": null, "has_intermediate_output": false}, "LoadAudio": {"input": {"required": {"audio": ["COMBO", {"multiselect": false, "options": [], "audio_upload": true}]}}, "input_order": {"required": ["audio"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "LoadAudio", "display_name": "Load Audio", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["import audio", "open audio", "audio file"], "essentials_category": "Audio", "has_intermediate_output": false}, "PreviewAudio": {"input": {"required": {"audio": ["AUDIO", {}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["audio"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "PreviewAudio", "display_name": "Preview Audio", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["play audio"], "essentials_category": null, "has_intermediate_output": false}, "ConditioningStableAudio": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "seconds_start": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.1}], "seconds_total": ["FLOAT", {"default": 47.0, "min": 0.0, "max": 1000.0, "step": 0.1}]}}, "input_order": {"required": ["positive", "negative", "seconds_start", "seconds_total"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "ConditioningStableAudio", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_audio", "category": "conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecordAudio": {"input": {"required": {"audio": ["AUDIO_RECORD", {}]}}, "input_order": {"required": ["audio"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecordAudio", "display_name": "Record Audio", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["microphone input", "audio capture", "voice input"], "essentials_category": null, "has_intermediate_output": false}, "TrimAudioDuration": {"input": {"required": {"audio": ["AUDIO", {}], "start_index": ["FLOAT", {"tooltip": "Start time in seconds, can be negative to count from the end (supports sub-seconds).", "default": 0.0, "min": -18446744073709551615, "max": 18446744073709551615, "step": 0.01}], "duration": ["FLOAT", {"tooltip": "Duration in seconds", "default": 60.0, "min": 0.0, "step": 0.01}]}}, "input_order": {"required": ["audio", "start_index", "duration"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "TrimAudioDuration", "display_name": "Trim Audio Duration", "description": "Trim audio tensor into chosen time range.", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["cut audio", "audio clip", "shorten audio"], "essentials_category": null, "has_intermediate_output": false}, "SplitAudioChannels": {"input": {"required": {"audio": ["AUDIO", {}]}}, "input_order": {"required": ["audio"]}, "is_input_list": false, "output": ["AUDIO", "AUDIO"], "output_is_list": [false, false], "output_name": ["left", "right"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "SplitAudioChannels", "display_name": "Split Audio Channels", "description": "Separates the audio into left and right channels.", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["stereo to mono"], "essentials_category": null, "has_intermediate_output": false}, "JoinAudioChannels": {"input": {"required": {"audio_left": ["AUDIO", {}], "audio_right": ["AUDIO", {}]}}, "input_order": {"required": ["audio_left", "audio_right"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["audio"], "output_tooltips": [null], "output_matchtypes": null, "name": "JoinAudioChannels", "display_name": "Join Audio Channels", "description": "Joins left and right mono audio channels into a stereo audio.", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AudioConcat": {"input": {"required": {"audio1": ["AUDIO", {}], "audio2": ["AUDIO", {}], "direction": ["COMBO", {"tooltip": "Whether to append audio2 after or before audio1.", "default": "after", "multiselect": false, "options": ["after", "before"]}]}}, "input_order": {"required": ["audio1", "audio2", "direction"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "AudioConcat", "display_name": "Audio Concat", "description": "Concatenates the audio1 to audio2 in the specified direction.", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["join audio", "combine audio", "append audio"], "essentials_category": null, "has_intermediate_output": false}, "AudioMerge": {"input": {"required": {"audio1": ["AUDIO", {}], "audio2": ["AUDIO", {}], "merge_method": ["COMBO", {"tooltip": "The method used to combine the audio waveforms.", "multiselect": false, "options": ["add", "mean", "subtract", "multiply"]}]}}, "input_order": {"required": ["audio1", "audio2", "merge_method"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "AudioMerge", "display_name": "Audio Merge", "description": "Combine two audio tracks by overlaying their waveforms.", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["mix audio", "overlay audio", "layer audio"], "essentials_category": null, "has_intermediate_output": false}, "AudioAdjustVolume": {"input": {"required": {"audio": ["AUDIO", {}], "volume": ["INT", {"tooltip": "Volume adjustment in decibels (dB). 0 = no change, +6 = double, -6 = half, etc", "default": 1, "min": -100, "max": 100}]}}, "input_order": {"required": ["audio", "volume"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "AudioAdjustVolume", "display_name": "Audio Adjust Volume", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["audio gain", "loudness", "audio level"], "essentials_category": null, "has_intermediate_output": false}, "EmptyAudio": {"input": {"required": {"duration": ["FLOAT", {"tooltip": "Duration of the empty audio clip in seconds", "default": 60.0, "min": 0.0, "max": 18446744073709551615, "step": 0.01}], "sample_rate": ["INT", {"tooltip": "Sample rate of the empty audio clip.", "advanced": true, "default": 44100, "min": 1, "max": 192000}], "channels": ["INT", {"tooltip": "Number of audio channels (1 for mono, 2 for stereo).", "advanced": true, "default": 2, "min": 1, "max": 2}]}}, "input_order": {"required": ["duration", "sample_rate", "channels"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyAudio", "display_name": "Empty Audio", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["blank audio"], "essentials_category": null, "has_intermediate_output": false}, "AudioEqualizer3Band": {"input": {"required": {"audio": ["AUDIO", {}], "low_gain_dB": ["FLOAT", {"tooltip": "Gain for Low frequencies (Bass)", "default": 0.0, "min": -24.0, "max": 24.0, "step": 0.1}], "low_freq": ["INT", {"tooltip": "Cutoff frequency for Low shelf", "default": 100, "min": 20, "max": 500}], "mid_gain_dB": ["FLOAT", {"tooltip": "Gain for Mid frequencies", "default": 0.0, "min": -24.0, "max": 24.0, "step": 0.1}], "mid_freq": ["INT", {"tooltip": "Center frequency for Mids", "default": 1000, "min": 200, "max": 4000}], "mid_q": ["FLOAT", {"tooltip": "Q factor (bandwidth) for Mids", "default": 0.707, "min": 0.1, "max": 10.0, "step": 0.1}], "high_gain_dB": ["FLOAT", {"tooltip": "Gain for High frequencies (Treble)", "default": 0.0, "min": -24.0, "max": 24.0, "step": 0.1}], "high_freq": ["INT", {"tooltip": "Cutoff frequency for High shelf", "default": 5000, "min": 1000, "max": 15000}]}}, "input_order": {"required": ["audio", "low_gain_dB", "low_freq", "mid_gain_dB", "mid_freq", "mid_q", "high_gain_dB", "high_freq"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "AudioEqualizer3Band", "display_name": "Audio Equalizer (3-Band)", "description": "", "python_module": "comfy_extras.nodes_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["eq", "bass boost", "treble boost", "equalizer"], "essentials_category": null, "has_intermediate_output": false}, "TripleCLIPLoader": {"input": {"required": {"clip_name1": ["COMBO", {"multiselect": false, "options": ["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]}], "clip_name2": ["COMBO", {"multiselect": false, "options": ["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]}], "clip_name3": ["COMBO", {"multiselect": false, "options": ["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]}]}}, "input_order": {"required": ["clip_name1", "clip_name2", "clip_name3"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "output_tooltips": [null], "output_matchtypes": null, "name": "TripleCLIPLoader", "display_name": null, "description": "[Recipes]\n\nsd3: clip-l, clip-g, t5", "python_module": "comfy_extras.nodes_sd3", "category": "advanced/loaders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptySD3LatentImage": {"input": {"required": {"width": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 16}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptySD3LatentImage", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_sd3", "category": "latent/sd3", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodeSD3": {"input": {"required": {"clip": ["CLIP", {}], "clip_l": ["STRING", {"multiline": true, "dynamicPrompts": true}], "clip_g": ["STRING", {"multiline": true, "dynamicPrompts": true}], "t5xxl": ["STRING", {"multiline": true, "dynamicPrompts": true}], "empty_padding": ["COMBO", {"advanced": true, "multiselect": false, "options": ["none", "empty_prompt"]}]}}, "input_order": {"required": ["clip", "clip_l", "clip_g", "t5xxl", "empty_padding"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodeSD3", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_sd3", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["sd3 prompt"], "essentials_category": null, "has_intermediate_output": false}, "ControlNetApplySD3": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "control_net": ["CONTROL_NET", {}], "vae": ["VAE", {}], "image": ["IMAGE", {}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["positive", "negative", "control_net", "vae", "image", "strength", "start_percent", "end_percent"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "ControlNetApplySD3", "display_name": "Apply Controlnet with VAE", "description": "", "python_module": "comfy_extras.nodes_sd3", "category": "conditioning/controlnet", "output_node": false, "deprecated": true, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SkipLayerGuidanceSD3": {"input": {"required": {"model": ["MODEL", {}], "layers": ["STRING", {"advanced": true, "default": "7, 8, 9", "multiline": false}], "scale": ["FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}], "start_percent": ["FLOAT", {"advanced": true, "default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"advanced": true, "default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["model", "layers", "scale", "start_percent", "end_percent"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "SkipLayerGuidanceSD3", "display_name": null, "description": "Generic version of SkipLayerGuidance node that can be used on every DiT model.", "python_module": "comfy_extras.nodes_sd3", "category": "advanced/guidance", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GITSScheduler": {"input": {"required": {"coeff": ["FLOAT", {"advanced": true, "default": 1.2, "min": 0.8, "max": 1.5, "step": 0.05}], "steps": ["INT", {"default": 10, "min": 2, "max": 1000}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["coeff", "steps", "denoise"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "GITSScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_gits", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SetUnionControlNetType": {"input": {"required": {"control_net": ["CONTROL_NET", {}], "type": ["COMBO", {"multiselect": false, "options": ["auto", "openpose", "depth", "hed/pidi/scribble/ted", "canny/lineart/anime_lineart/mlsd", "normal", "segment", "tile", "repaint"]}]}}, "input_order": {"required": ["control_net", "type"]}, "is_input_list": false, "output": ["CONTROL_NET"], "output_is_list": [false], "output_name": ["CONTROL_NET"], "output_tooltips": [null], "output_matchtypes": null, "name": "SetUnionControlNetType", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_controlnet", "category": "conditioning/controlnet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ControlNetInpaintingAliMamaApply": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "control_net": ["CONTROL_NET", {}], "vae": ["VAE", {}], "image": ["IMAGE", {}], "mask": ["MASK", {}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "start_percent": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["positive", "negative", "control_net", "vae", "image", "mask", "strength", "start_percent", "end_percent"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "ControlNetInpaintingAliMamaApply", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_controlnet", "category": "conditioning/controlnet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["masked controlnet"], "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodeHunyuanDiT": {"input": {"required": {"clip": ["CLIP", {}], "bert": ["STRING", {"multiline": true, "dynamicPrompts": true}], "mt5xl": ["STRING", {"multiline": true, "dynamicPrompts": true}]}}, "input_order": {"required": ["clip", "bert", "mt5xl"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodeHunyuanDiT", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TextEncodeHunyuanVideo_ImageToVideo": {"input": {"required": {"clip": ["CLIP", {}], "clip_vision_output": ["CLIP_VISION_OUTPUT", {}], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}], "image_interleave": ["INT", {"tooltip": "How much the image influences things vs the text prompt. Higher number means more influence from the text prompt.", "advanced": true, "default": 2, "min": 1, "max": 512}]}}, "input_order": {"required": ["clip", "clip_vision_output", "prompt", "image_interleave"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "TextEncodeHunyuanVideo_ImageToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyHunyuanLatentVideo": {"input": {"required": {"width": ["INT", {"default": 848, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 25, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "length", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyHunyuanLatentVideo", "display_name": "Empty HunyuanVideo 1.0 Latent", "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "latent/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyHunyuanVideo15Latent": {"input": {"required": {"width": ["INT", {"default": 848, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 25, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "length", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyHunyuanVideo15Latent", "display_name": "Empty HunyuanVideo 1.5 Latent", "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "latent/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "HunyuanVideo15ImageToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 848, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 33, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"start_image": ["IMAGE", {}], "clip_vision_output": ["CLIP_VISION_OUTPUT", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["start_image", "clip_vision_output"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "HunyuanVideo15ImageToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "HunyuanVideo15SuperResolution": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "latent": ["LATENT", {}], "noise_augmentation": ["FLOAT", {"advanced": true, "default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"vae": ["VAE", {}], "start_image": ["IMAGE", {}], "clip_vision_output": ["CLIP_VISION_OUTPUT", {}]}}, "input_order": {"required": ["positive", "negative", "latent", "noise_augmentation"], "optional": ["vae", "start_image", "clip_vision_output"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "HunyuanVideo15SuperResolution", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "sd", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "HunyuanVideo15LatentUpscaleWithModel": {"input": {"required": {"model": ["LATENT_UPSCALE_MODEL", {}], "samples": ["LATENT", {}], "upscale_method": ["COMBO", {"default": "bilinear", "multiselect": false, "options": ["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]}], "width": ["INT", {"default": 1280, "min": 0, "max": 16384, "step": 8}], "height": ["INT", {"default": 720, "min": 0, "max": 16384, "step": 8}], "crop": ["COMBO", {"multiselect": false, "options": ["disabled", "center"]}]}}, "input_order": {"required": ["model", "samples", "upscale_method", "width", "height", "crop"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "HunyuanVideo15LatentUpscaleWithModel", "display_name": "Hunyuan Video 15 Latent Upscale With Model", "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "latent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LatentUpscaleModelLoader": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["hunyuanvideo15_latent_upsampler_1080p.safetensors", "hunyuanvideo15_latent_upsampler_720p.safetensors"]}]}}, "input_order": {"required": ["model_name"]}, "is_input_list": false, "output": ["LATENT_UPSCALE_MODEL"], "output_is_list": [false], "output_name": ["LATENT_UPSCALE_MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "LatentUpscaleModelLoader", "display_name": "Load Latent Upscale Model", "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "loaders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "HunyuanImageToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 848, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 53, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "guidance_type": ["COMBO", {"advanced": true, "multiselect": false, "options": ["v1 (concat)", "v2 (replace)", "custom"]}]}, "optional": {"start_image": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "vae", "width", "height", "length", "batch_size", "guidance_type"], "optional": ["start_image"]}, "is_input_list": false, "output": ["CONDITIONING", "LATENT"], "output_is_list": [false, false], "output_name": ["positive", "latent"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "HunyuanImageToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyHunyuanImageLatent": {"input": {"required": {"width": ["INT", {"default": 2048, "min": 64, "max": 16384, "step": 32}], "height": ["INT", {"default": 2048, "min": 64, "max": 16384, "step": 32}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyHunyuanImageLatent", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "latent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "HunyuanRefinerLatent": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "latent": ["LATENT", {}], "noise_augmentation": ["FLOAT", {"advanced": true, "default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["positive", "negative", "latent", "noise_augmentation"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "HunyuanRefinerLatent", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan", "category": "sd", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Epsilon Scaling": {"input": {"required": {"model": ["MODEL", {}], "scaling_factor": ["FLOAT", {"advanced": true, "default": 1.005, "min": 0.5, "max": 1.5, "step": 0.001, "display": "number"}]}}, "input_order": {"required": ["model", "scaling_factor"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "Epsilon Scaling", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_eps", "category": "model_patches/unet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TemporalScoreRescaling": {"input": {"required": {"model": ["MODEL", {}], "tsr_k": ["FLOAT", {"tooltip": "Controls the rescaling strength.\nLower k produces more detailed results; higher k produces smoother results in image generation. Setting k = 1 disables rescaling.", "advanced": true, "default": 0.95, "min": 0.01, "max": 100.0, "step": 0.001, "display": "number"}], "tsr_sigma": ["FLOAT", {"tooltip": "Controls how early rescaling takes effect.\nLarger values take effect earlier.", "advanced": true, "default": 1.0, "min": 0.01, "max": 100.0, "step": 0.001, "display": "number"}]}}, "input_order": {"required": ["model", "tsr_k", "tsr_sigma"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["patched_model"], "output_tooltips": [null], "output_matchtypes": null, "name": "TemporalScoreRescaling", "display_name": "TSR - Temporal Score Rescaling", "description": "[Post-CFG Function]\nTSR - Temporal Score Rescaling (2510.01184)\n\nRescaling the model's score or noise to steer the sampling diversity.\n", "python_module": "comfy_extras.nodes_eps", "category": "model_patches/unet", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodeFlux": {"input": {"required": {"clip": ["CLIP", {}], "clip_l": ["STRING", {"multiline": true, "dynamicPrompts": true}], "t5xxl": ["STRING", {"multiline": true, "dynamicPrompts": true}], "guidance": ["FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}]}}, "input_order": {"required": ["clip", "clip_l", "t5xxl", "guidance"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodeFlux", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_flux", "category": "advanced/conditioning/flux", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxGuidance": {"input": {"required": {"conditioning": ["CONDITIONING", {}], "guidance": ["FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}]}}, "input_order": {"required": ["conditioning", "guidance"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxGuidance", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_flux", "category": "advanced/conditioning/flux", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxDisableGuidance": {"input": {"required": {"conditioning": ["CONDITIONING", {}]}}, "input_order": {"required": ["conditioning"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxDisableGuidance", "display_name": null, "description": "This node completely disables the guidance embed on Flux and Flux like models", "python_module": "comfy_extras.nodes_flux", "category": "advanced/conditioning/flux", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxKontextImageScale": {"input": {"required": {"image": ["IMAGE", {}]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxKontextImageScale", "display_name": null, "description": "This node resizes the image to one that is more optimal for flux kontext.", "python_module": "comfy_extras.nodes_flux", "category": "advanced/conditioning/flux", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxKontextMultiReferenceLatentMethod": {"input": {"required": {"conditioning": ["CONDITIONING", {}], "reference_latents_method": ["COMBO", {"advanced": true, "multiselect": false, "options": ["offset", "index", "uxo/uno", "index_timestep_zero"]}]}}, "input_order": {"required": ["conditioning", "reference_latents_method"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxKontextMultiReferenceLatentMethod", "display_name": "Edit Model Reference Method", "description": "", "python_module": "comfy_extras.nodes_flux", "category": "advanced/conditioning/flux", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyFlux2LatentImage": {"input": {"required": {"width": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 16}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyFlux2LatentImage", "display_name": "Empty Flux 2 Latent", "description": "", "python_module": "comfy_extras.nodes_flux", "category": "latent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Flux2Scheduler": {"input": {"required": {"steps": ["INT", {"default": 20, "min": 1, "max": 4096}], "width": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 1}], "height": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 1}]}}, "input_order": {"required": ["steps", "width", "height"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "Flux2Scheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_flux", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxKVCache": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model to use KV Cache on."}]}}, "input_order": {"required": ["model"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": ["The patched model with KV Cache enabled."], "output_matchtypes": null, "name": "FluxKVCache", "display_name": "Flux KV Cache", "description": "Enables KV Cache optimization for reference images on Flux family models.", "python_module": "comfy_extras.nodes_flux", "category": "", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LoraSave": {"input": {"required": {"filename_prefix": ["STRING", {"default": "loras/ComfyUI_extracted_lora", "multiline": false}], "rank": ["INT", {"advanced": true, "default": 8, "min": 1, "max": 4096, "step": 1}], "lora_type": ["COMBO", {"advanced": true, "multiselect": false, "options": ["standard", "full_diff"]}], "bias_diff": ["BOOLEAN", {"advanced": true, "default": true}]}, "optional": {"model_diff": ["MODEL", {"tooltip": "The ModelSubtract output to be converted to a lora."}], "text_encoder_diff": ["CLIP", {"tooltip": "The CLIPSubtract output to be converted to a lora."}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["filename_prefix", "rank", "lora_type", "bias_diff"], "optional": ["model_diff", "text_encoder_diff"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "LoraSave", "display_name": "Extract and Save Lora", "description": "", "python_module": "comfy_extras.nodes_lora_extract", "category": "_for_testing", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export lora"], "essentials_category": null, "has_intermediate_output": false}, "TorchCompileModel": {"input": {"required": {"model": ["MODEL", {}], "backend": ["COMBO", {"advanced": true, "multiselect": false, "options": ["inductor", "cudagraphs"]}]}}, "input_order": {"required": ["model", "backend"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "TorchCompileModel", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_torch_compile", "category": "_for_testing", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyMochiLatentVideo": {"input": {"required": {"width": ["INT", {"default": 848, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 25, "min": 7, "max": 16384, "step": 6}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "length", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyMochiLatentVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_mochi", "category": "latent/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SkipLayerGuidanceDiT": {"input": {"required": {"model": ["MODEL", {}], "double_layers": ["STRING", {"advanced": true, "default": "7, 8, 9", "multiline": false}], "single_layers": ["STRING", {"advanced": true, "default": "7, 8, 9", "multiline": false}], "scale": ["FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}], "start_percent": ["FLOAT", {"advanced": true, "default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"advanced": true, "default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001}], "rescaling_scale": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "double_layers", "single_layers", "scale", "start_percent", "end_percent", "rescaling_scale"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "SkipLayerGuidanceDiT", "display_name": null, "description": "Generic version of SkipLayerGuidance node that can be used on every DiT model.", "python_module": "comfy_extras.nodes_slg", "category": "advanced/guidance", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SkipLayerGuidanceDiTSimple": {"input": {"required": {"model": ["MODEL", {}], "double_layers": ["STRING", {"advanced": true, "default": "7, 8, 9", "multiline": false}], "single_layers": ["STRING", {"advanced": true, "default": "7, 8, 9", "multiline": false}], "start_percent": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["model", "double_layers", "single_layers", "start_percent", "end_percent"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "SkipLayerGuidanceDiTSimple", "display_name": null, "description": "Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass.", "python_module": "comfy_extras.nodes_slg", "category": "advanced/guidance", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Mahiro": {"input": {"required": {"model": ["MODEL", {}]}}, "input_order": {"required": ["model"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["patched_model"], "output_tooltips": [null], "output_matchtypes": null, "name": "Mahiro", "display_name": "Positive-Biased Guidance", "description": "Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.", "python_module": "comfy_extras.nodes_mahiro", "category": "_for_testing", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["mahiro", "mahiro cfg", "similarity-adaptive guidance", "positive-biased cfg"], "essentials_category": null, "has_intermediate_output": false}, "LTXVLatentUpsampler": {"input": {"required": {"samples": ["LATENT"], "upscale_model": ["LATENT_UPSCALE_MODEL"], "vae": ["VAE"]}}, "input_order": {"required": ["samples", "upscale_model", "vae"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LTXVLatentUpsampler", "display_name": "LTXVLatentUpsampler", "description": "", "python_module": "comfy_extras.nodes_lt_upsampler", "category": "latent/video", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "LTXVAudioVAELoader": {"input": {"required": {"ckpt_name": ["COMBO", {"tooltip": "Audio VAE checkpoint to load.", "multiselect": false, "options": ["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]}]}}, "input_order": {"required": ["ckpt_name"]}, "is_input_list": false, "output": ["VAE"], "output_is_list": [false], "output_name": ["Audio VAE"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXVAudioVAELoader", "display_name": "LTXV Audio VAE Loader", "description": "", "python_module": "comfy_extras.nodes_lt_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVAudioVAEEncode": {"input": {"required": {"audio": ["AUDIO", {"tooltip": "The audio to be encoded."}], "audio_vae": ["VAE", {"display_name": "Audio VAE", "tooltip": "The Audio VAE model to use for encoding."}]}}, "input_order": {"required": ["audio", "audio_vae"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["Audio Latent"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXVAudioVAEEncode", "display_name": "LTXV Audio VAE Encode", "description": "", "python_module": "comfy_extras.nodes_lt_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVAudioVAEDecode": {"input": {"required": {"samples": ["LATENT", {"tooltip": "The latent to be decoded."}], "audio_vae": ["VAE", {"display_name": "Audio VAE", "tooltip": "The Audio VAE model used for decoding the latent."}]}}, "input_order": {"required": ["samples", "audio_vae"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["Audio"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXVAudioVAEDecode", "display_name": "LTXV Audio VAE Decode", "description": "", "python_module": "comfy_extras.nodes_lt_audio", "category": "audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVEmptyLatentAudio": {"input": {"required": {"frames_number": ["INT", {"tooltip": "Number of frames.", "default": 97, "min": 1, "max": 1000, "step": 1, "display": "number"}], "frame_rate": ["INT", {"tooltip": "Number of frames per second.", "default": 25, "min": 1, "max": 1000, "step": 1, "display": "number"}], "batch_size": ["INT", {"tooltip": "The number of latent audio samples in the batch.", "default": 1, "min": 1, "max": 4096, "display": "number"}], "audio_vae": ["VAE", {"display_name": "Audio VAE", "tooltip": "The Audio VAE model to get configuration from."}]}}, "input_order": {"required": ["frames_number", "frame_rate", "batch_size", "audio_vae"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["Latent"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXVEmptyLatentAudio", "display_name": "LTXV Empty Latent Audio", "description": "", "python_module": "comfy_extras.nodes_lt_audio", "category": "latent/audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXAVTextEncoderLoader": {"input": {"required": {"text_encoder": ["COMBO", {"multiselect": false, "options": ["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]}], "ckpt_name": ["COMBO", {"multiselect": false, "options": ["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]}], "device": ["COMBO", {"advanced": true, "multiselect": false, "options": ["default", "cpu"]}]}}, "input_order": {"required": ["text_encoder", "ckpt_name", "device"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXAVTextEncoderLoader", "display_name": "LTXV Audio Text Encoder Loader", "description": "[Recipes]\n\nltxav: gemma 3 12B", "python_module": "comfy_extras.nodes_lt_audio", "category": "advanced/loaders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyLTXVLatentVideo": {"input": {"required": {"width": ["INT", {"default": 768, "min": 64, "max": 16384, "step": 32}], "height": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 32}], "length": ["INT", {"default": 97, "min": 1, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "length", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyLTXVLatentVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "latent/video/ltxv", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVImgToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "image": ["IMAGE", {}], "width": ["INT", {"default": 768, "min": 64, "max": 16384, "step": 32}], "height": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 32}], "length": ["INT", {"default": 97, "min": 9, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}]}}, "input_order": {"required": ["positive", "negative", "vae", "image", "width", "height", "length", "batch_size", "strength"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "LTXVImgToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVImgToVideoInplace": {"input": {"required": {"vae": ["VAE", {}], "image": ["IMAGE", {}], "latent": ["LATENT", {}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}], "bypass": ["BOOLEAN", {"tooltip": "Bypass the conditioning.", "default": false}]}}, "input_order": {"required": ["vae", "image", "latent", "strength", "bypass"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["latent"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXVImgToVideoInplace", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ModelSamplingLTXV": {"input": {"required": {"model": ["MODEL", {}], "max_shift": ["FLOAT", {"default": 2.05, "min": 0.0, "max": 100.0, "step": 0.01}], "base_shift": ["FLOAT", {"default": 0.95, "min": 0.0, "max": 100.0, "step": 0.01}]}, "optional": {"latent": ["LATENT", {}]}}, "input_order": {"required": ["model", "max_shift", "base_shift"], "optional": ["latent"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "ModelSamplingLTXV", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "advanced/model", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVConditioning": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "frame_rate": ["FLOAT", {"default": 25.0, "min": 0.0, "max": 1000.0, "step": 0.01}]}}, "input_order": {"required": ["positive", "negative", "frame_rate"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "LTXVConditioning", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVScheduler": {"input": {"required": {"steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "max_shift": ["FLOAT", {"default": 2.05, "min": 0.0, "max": 100.0, "step": 0.01}], "base_shift": ["FLOAT", {"default": 0.95, "min": 0.0, "max": 100.0, "step": 0.01}], "stretch": ["BOOLEAN", {"tooltip": "Stretch the sigmas to be in the range [terminal, 1].", "advanced": true, "default": true}], "terminal": ["FLOAT", {"tooltip": "The terminal value of the sigmas after stretching.", "advanced": true, "default": 0.1, "min": 0.0, "max": 0.99, "step": 0.01}]}, "optional": {"latent": ["LATENT", {}]}}, "input_order": {"required": ["steps", "max_shift", "base_shift", "stretch", "terminal"], "optional": ["latent"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXVScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVAddGuide": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "latent": ["LATENT", {}], "image": ["IMAGE", {"tooltip": "Image or video to condition the latent video on. Must be 8*n + 1 frames. If the video is not 8*n + 1 frames, it will be cropped to the nearest 8*n + 1 frames."}], "frame_idx": ["INT", {"tooltip": "Frame index to start the conditioning at. For single-frame images or videos with 1-8 frames, any frame_idx value is acceptable. For videos with 9+ frames, frame_idx must be divisible by 8, otherwise it will be rounded down to the nearest multiple of 8. Negative values are counted from the end of the video.", "default": 0, "min": -9999, "max": 9999}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["positive", "negative", "vae", "latent", "image", "frame_idx", "strength"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "LTXVAddGuide", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVPreprocess": {"input": {"required": {"image": ["IMAGE", {}], "img_compression": ["INT", {"tooltip": "Amount of compression to apply on image.", "default": 35, "min": 0, "max": 100}]}}, "input_order": {"required": ["image", "img_compression"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["output_image"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXVPreprocess", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "image", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVCropGuides": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "latent": ["LATENT", {}]}}, "input_order": {"required": ["positive", "negative", "latent"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "LTXVCropGuides", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVConcatAVLatent": {"input": {"required": {"video_latent": ["LATENT", {}], "audio_latent": ["LATENT", {}]}}, "input_order": {"required": ["video_latent", "audio_latent"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["latent"], "output_tooltips": [null], "output_matchtypes": null, "name": "LTXVConcatAVLatent", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lt", "category": "latent/video/ltxv", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVSeparateAVLatent": {"input": {"required": {"av_latent": ["LATENT", {}]}}, "input_order": {"required": ["av_latent"]}, "is_input_list": false, "output": ["LATENT", "LATENT"], "output_is_list": [false, false], "output_name": ["video_latent", "audio_latent"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "LTXVSeparateAVLatent", "display_name": null, "description": "LTXV Separate AV Latent", "python_module": "comfy_extras.nodes_lt", "category": "latent/video/ltxv", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LTXVReferenceAudio": {"input": {"required": {"model": ["MODEL", {}], "positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "reference_audio": ["AUDIO", {"tooltip": "Reference audio clip whose speaker identity to transfer. ~5 seconds recommended (training duration). Shorter or longer clips may degrade voice identity transfer."}], "audio_vae": ["VAE", {"display_name": "Audio VAE", "tooltip": "LTXV Audio VAE for encoding."}], "identity_guidance_scale": ["FLOAT", {"tooltip": "Strength of identity guidance. Runs an extra forward pass without reference each step to amplify speaker identity. Set to 0 to disable (no extra pass).", "default": 3.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": 0.01}], "start_percent": ["FLOAT", {"tooltip": "Start of the sigma range where identity guidance is active.", "advanced": true, "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"tooltip": "End of the sigma range where identity guidance is active.", "advanced": true, "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["model", "positive", "negative", "reference_audio", "audio_vae", "identity_guidance_scale", "start_percent", "end_percent"]}, "is_input_list": false, "output": ["MODEL", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false], "output_name": ["MODEL", "positive", "negative"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "LTXVReferenceAudio", "display_name": "LTXV Reference Audio (ID-LoRA)", "description": "Set reference audio for ID-LoRA speaker identity transfer. Encodes a reference audio clip into the conditioning and optionally patches the model with identity guidance (extra forward pass without reference, amplifying the speaker identity effect).", "python_module": "comfy_extras.nodes_lt", "category": "conditioning/audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CreateHookLora": {"input": {"required": {"lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "strength_model": ["FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}], "strength_clip": ["FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}]}, "optional": {"prev_hooks": ["HOOKS"]}}, "input_order": {"required": ["lora_name", "strength_model", "strength_clip"], "optional": ["prev_hooks"]}, "is_input_list": false, "output": ["HOOKS"], "output_is_list": [false], "output_name": ["HOOKS"], "name": "CreateHookLora", "display_name": "Create Hook LoRA", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/create", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "CreateHookLoraModelOnly": {"input": {"required": {"lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "strength_model": ["FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}]}, "optional": {"prev_hooks": ["HOOKS"]}}, "input_order": {"required": ["lora_name", "strength_model"], "optional": ["prev_hooks"]}, "is_input_list": false, "output": ["HOOKS"], "output_is_list": [false], "output_name": ["HOOKS"], "name": "CreateHookLoraModelOnly", "display_name": "Create Hook LoRA (MO)", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/create", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "CreateHookModelAsLora": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "strength_model": ["FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}], "strength_clip": ["FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}]}, "optional": {"prev_hooks": ["HOOKS"]}}, "input_order": {"required": ["ckpt_name", "strength_model", "strength_clip"], "optional": ["prev_hooks"]}, "is_input_list": false, "output": ["HOOKS"], "output_is_list": [false], "output_name": ["HOOKS"], "name": "CreateHookModelAsLora", "display_name": "Create Hook Model as LoRA", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/create", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "CreateHookModelAsLoraModelOnly": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "strength_model": ["FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}]}, "optional": {"prev_hooks": ["HOOKS"]}}, "input_order": {"required": ["ckpt_name", "strength_model"], "optional": ["prev_hooks"]}, "is_input_list": false, "output": ["HOOKS"], "output_is_list": [false], "output_name": ["HOOKS"], "name": "CreateHookModelAsLoraModelOnly", "display_name": "Create Hook Model as LoRA (MO)", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/create", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "SetHookKeyframes": {"input": {"required": {"hooks": ["HOOKS"]}, "optional": {"hook_kf": ["HOOK_KEYFRAMES"]}}, "input_order": {"required": ["hooks"], "optional": ["hook_kf"]}, "is_input_list": false, "output": ["HOOKS"], "output_is_list": [false], "output_name": ["HOOKS"], "name": "SetHookKeyframes", "display_name": "Set Hook Keyframes", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/scheduling", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "CreateHookKeyframe": {"input": {"required": {"strength_mult": ["FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}]}, "optional": {"prev_hook_kf": ["HOOK_KEYFRAMES"]}}, "input_order": {"required": ["strength_mult", "start_percent"], "optional": ["prev_hook_kf"]}, "is_input_list": false, "output": ["HOOK_KEYFRAMES"], "output_is_list": [false], "output_name": ["HOOK_KF"], "name": "CreateHookKeyframe", "display_name": "Create Hook Keyframe", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/scheduling", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": ["hook scheduling", "strength animation", "timed hook"]}, "CreateHookKeyframesInterpolated": {"input": {"required": {"strength_start": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}], "strength_end": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out"]], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "keyframes_count": ["INT", {"default": 5, "min": 2, "max": 100, "step": 1}], "print_keyframes": ["BOOLEAN", {"default": false, "advanced": true}]}, "optional": {"prev_hook_kf": ["HOOK_KEYFRAMES"]}}, "input_order": {"required": ["strength_start", "strength_end", "interpolation", "start_percent", "end_percent", "keyframes_count", "print_keyframes"], "optional": ["prev_hook_kf"]}, "is_input_list": false, "output": ["HOOK_KEYFRAMES"], "output_is_list": [false], "output_name": ["HOOK_KF"], "name": "CreateHookKeyframesInterpolated", "display_name": "Create Hook Keyframes Interp.", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/scheduling", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": ["ease hook strength", "smooth hook transition", "interpolate keyframes"]}, "CreateHookKeyframesFromFloats": {"input": {"required": {"floats_strength": ["FLOATS", {"default": -1, "min": -1, "step": 0.001, "forceInput": true}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "print_keyframes": ["BOOLEAN", {"default": false, "advanced": true}]}, "optional": {"prev_hook_kf": ["HOOK_KEYFRAMES"]}}, "input_order": {"required": ["floats_strength", "start_percent", "end_percent", "print_keyframes"], "optional": ["prev_hook_kf"]}, "is_input_list": false, "output": ["HOOK_KEYFRAMES"], "output_is_list": [false], "output_name": ["HOOK_KF"], "name": "CreateHookKeyframesFromFloats", "display_name": "Create Hook Keyframes From Floats", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/scheduling", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": ["batch keyframes", "strength list to keyframes"]}, "CombineHooks2": {"input": {"required": {}, "optional": {"hooks_A": ["HOOKS"], "hooks_B": ["HOOKS"]}}, "input_order": {"required": [], "optional": ["hooks_A", "hooks_B"]}, "is_input_list": false, "output": ["HOOKS"], "output_is_list": [false], "output_name": ["HOOKS"], "name": "CombineHooks2", "display_name": "Combine Hooks [2]", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/combine", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": ["merge hooks"]}, "CombineHooks4": {"input": {"required": {}, "optional": {"hooks_A": ["HOOKS"], "hooks_B": ["HOOKS"], "hooks_C": ["HOOKS"], "hooks_D": ["HOOKS"]}}, "input_order": {"required": [], "optional": ["hooks_A", "hooks_B", "hooks_C", "hooks_D"]}, "is_input_list": false, "output": ["HOOKS"], "output_is_list": [false], "output_name": ["HOOKS"], "name": "CombineHooks4", "display_name": "Combine Hooks [4]", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/combine", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "CombineHooks8": {"input": {"required": {}, "optional": {"hooks_A": ["HOOKS"], "hooks_B": ["HOOKS"], "hooks_C": ["HOOKS"], "hooks_D": ["HOOKS"], "hooks_E": ["HOOKS"], "hooks_F": ["HOOKS"], "hooks_G": ["HOOKS"], "hooks_H": ["HOOKS"]}}, "input_order": {"required": [], "optional": ["hooks_A", "hooks_B", "hooks_C", "hooks_D", "hooks_E", "hooks_F", "hooks_G", "hooks_H"]}, "is_input_list": false, "output": ["HOOKS"], "output_is_list": [false], "output_name": ["HOOKS"], "name": "CombineHooks8", "display_name": "Combine Hooks [8]", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/combine", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "ConditioningSetProperties": {"input": {"required": {"cond_NEW": ["CONDITIONING"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}, "optional": {"mask": ["MASK"], "hooks": ["HOOKS"], "timesteps": ["TIMESTEPS_RANGE"]}}, "input_order": {"required": ["cond_NEW", "strength", "set_cond_area"], "optional": ["mask", "hooks", "timesteps"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetProperties", "display_name": "Cond Set Props", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/cond single", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "ConditioningSetPropertiesAndCombine": {"input": {"required": {"cond": ["CONDITIONING"], "cond_NEW": ["CONDITIONING"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}, "optional": {"mask": ["MASK"], "hooks": ["HOOKS"], "timesteps": ["TIMESTEPS_RANGE"]}}, "input_order": {"required": ["cond", "cond_NEW", "strength", "set_cond_area"], "optional": ["mask", "hooks", "timesteps"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetPropertiesAndCombine", "display_name": "Cond Set Props Combine", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/cond single", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "PairConditioningSetProperties": {"input": {"required": {"positive_NEW": ["CONDITIONING"], "negative_NEW": ["CONDITIONING"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}, "optional": {"mask": ["MASK"], "hooks": ["HOOKS"], "timesteps": ["TIMESTEPS_RANGE"]}}, "input_order": {"required": ["positive_NEW", "negative_NEW", "strength", "set_cond_area"], "optional": ["mask", "hooks", "timesteps"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "name": "PairConditioningSetProperties", "display_name": "Cond Pair Set Props", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/cond pair", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "PairConditioningSetPropertiesAndCombine": {"input": {"required": {"positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "positive_NEW": ["CONDITIONING"], "negative_NEW": ["CONDITIONING"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}, "optional": {"mask": ["MASK"], "hooks": ["HOOKS"], "timesteps": ["TIMESTEPS_RANGE"]}}, "input_order": {"required": ["positive", "negative", "positive_NEW", "negative_NEW", "strength", "set_cond_area"], "optional": ["mask", "hooks", "timesteps"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "name": "PairConditioningSetPropertiesAndCombine", "display_name": "Cond Pair Set Props Combine", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/cond pair", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "ConditioningSetDefaultCombine": {"input": {"required": {"cond": ["CONDITIONING"], "cond_DEFAULT": ["CONDITIONING"]}, "optional": {"hooks": ["HOOKS"]}}, "input_order": {"required": ["cond", "cond_DEFAULT"], "optional": ["hooks"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ConditioningSetDefaultCombine", "display_name": "Cond Set Default Combine", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/cond single", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "PairConditioningSetDefaultCombine": {"input": {"required": {"positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "positive_DEFAULT": ["CONDITIONING"], "negative_DEFAULT": ["CONDITIONING"]}, "optional": {"hooks": ["HOOKS"]}}, "input_order": {"required": ["positive", "negative", "positive_DEFAULT", "negative_DEFAULT"], "optional": ["hooks"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "name": "PairConditioningSetDefaultCombine", "display_name": "Cond Pair Set Default Combine", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/cond pair", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "PairConditioningCombine": {"input": {"required": {"positive_A": ["CONDITIONING"], "negative_A": ["CONDITIONING"], "positive_B": ["CONDITIONING"], "negative_B": ["CONDITIONING"]}}, "input_order": {"required": ["positive_A", "negative_A", "positive_B", "negative_B"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "name": "PairConditioningCombine", "display_name": "Cond Pair Combine", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/cond pair", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "SetClipHooks": {"input": {"required": {"clip": ["CLIP"], "apply_to_conds": ["BOOLEAN", {"default": true, "advanced": true}], "schedule_clip": ["BOOLEAN", {"default": false, "advanced": true}]}, "optional": {"hooks": ["HOOKS"]}}, "input_order": {"required": ["clip", "apply_to_conds", "schedule_clip"], "optional": ["hooks"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "SetClipHooks", "display_name": "Set CLIP Hooks", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks/clip", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "ConditioningTimestepsRange": {"input": {"required": {"start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["start_percent", "end_percent"]}, "is_input_list": false, "output": ["TIMESTEPS_RANGE", "TIMESTEPS_RANGE", "TIMESTEPS_RANGE"], "output_is_list": [false, false, false], "output_name": ["TIMESTEPS_RANGE", "BEFORE_RANGE", "AFTER_RANGE"], "name": "ConditioningTimestepsRange", "display_name": "Timesteps Range", "description": "", "python_module": "comfy_extras.nodes_hooks", "category": "advanced/hooks", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": ["prompt scheduling", "timestep segments", "conditioning phases"]}, "Load3D": {"input": {"required": {"model_file": ["COMBO", {"multiselect": false, "options": [], "file_upload": true}], "image": ["LOAD_3D", {}], "width": ["INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}], "height": ["INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}]}}, "input_order": {"required": ["model_file", "image", "width", "height"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA", "VIDEO", "FILE_3D"], "output_is_list": [false, false, false, false, false, false, false], "output_name": ["image", "mask", "mesh_path", "normal", "camera_info", "recording_video", "model_3d"], "output_tooltips": [null, null, null, null, null, null, null], "output_matchtypes": null, "name": "Load3D", "display_name": "Load 3D & Animation", "description": "", "python_module": "comfy_extras.nodes_load_3d", "category": "3d", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": "Basics", "has_intermediate_output": false}, "Preview3D": {"input": {"required": {"model_file": ["STRING,FILE_3D_GLB,FILE_3D_GLTF,FILE_3D_FBX,FILE_3D_OBJ,FILE_3D_STL,FILE_3D_USDZ,FILE_3D", {"default": "", "widgetType": "STRING", "multiline": false, "tooltip": "3D model file or path string"}]}, "optional": {"camera_info": ["LOAD3D_CAMERA", {"advanced": true}], "bg_image": ["IMAGE", {"advanced": true}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model_file"], "optional": ["camera_info", "bg_image"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "Preview3D", "display_name": "Preview 3D & Animation", "description": "", "python_module": "comfy_extras.nodes_load_3d", "category": "3d", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["view mesh", "3d viewer"], "essentials_category": null, "has_intermediate_output": false}, "EmptyCosmosLatentVideo": {"input": {"required": {"width": ["INT", {"default": 1280, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 704, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 121, "min": 1, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "length", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyCosmosLatentVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_cosmos", "category": "latent/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CosmosImageToVideoLatent": {"input": {"required": {"vae": ["VAE", {}], "width": ["INT", {"default": 1280, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 704, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 121, "min": 1, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"start_image": ["IMAGE", {}], "end_image": ["IMAGE", {}]}}, "input_order": {"required": ["vae", "width", "height", "length", "batch_size"], "optional": ["start_image", "end_image"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "CosmosImageToVideoLatent", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_cosmos", "category": "conditioning/inpaint", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CosmosPredict2ImageToVideoLatent": {"input": {"required": {"vae": ["VAE", {}], "width": ["INT", {"default": 848, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 93, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"start_image": ["IMAGE", {}], "end_image": ["IMAGE", {}]}}, "input_order": {"required": ["vae", "width", "height", "length", "batch_size"], "optional": ["start_image", "end_image"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "CosmosPredict2ImageToVideoLatent", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_cosmos", "category": "conditioning/inpaint", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SaveWEBM": {"input": {"required": {"images": ["IMAGE", {}], "filename_prefix": ["STRING", {"default": "ComfyUI", "multiline": false}], "codec": ["COMBO", {"multiselect": false, "options": ["vp9", "av1"]}], "fps": ["FLOAT", {"default": 24.0, "min": 0.01, "max": 1000.0, "step": 0.01}], "crf": ["FLOAT", {"tooltip": "Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize.", "default": 32.0, "min": 0, "max": 63.0, "step": 1}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["images", "filename_prefix", "codec", "fps", "crf"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveWEBM", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_video", "category": "image/video", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export webm"], "essentials_category": null, "has_intermediate_output": false}, "SaveVideo": {"input": {"required": {"video": ["VIDEO", {"tooltip": "The video to save."}], "filename_prefix": ["STRING", {"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes.", "default": "video/ComfyUI", "multiline": false}], "format": ["COMBO", {"tooltip": "The format to save the video as.", "default": "auto", "multiselect": false, "options": ["auto", "mp4"]}], "codec": ["COMBO", {"tooltip": "The codec to use for the video.", "default": "auto", "multiselect": false, "options": ["auto", "h264"]}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["video", "filename_prefix", "format", "codec"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveVideo", "display_name": "Save Video", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "comfy_extras.nodes_video", "category": "image/video", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export video"], "essentials_category": "Basics", "has_intermediate_output": false}, "CreateVideo": {"input": {"required": {"images": ["IMAGE", {"tooltip": "The images to create a video from."}], "fps": ["FLOAT", {"default": 30.0, "min": 1.0, "max": 120.0, "step": 1.0}]}, "optional": {"audio": ["AUDIO", {"tooltip": "The audio to add to the video."}]}}, "input_order": {"required": ["images", "fps"], "optional": ["audio"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "CreateVideo", "display_name": "Create Video", "description": "Create a video from images.", "python_module": "comfy_extras.nodes_video", "category": "image/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["images to video"], "essentials_category": null, "has_intermediate_output": false}, "GetVideoComponents": {"input": {"required": {"video": ["VIDEO", {"tooltip": "The video to extract components from."}]}}, "input_order": {"required": ["video"]}, "is_input_list": false, "output": ["IMAGE", "AUDIO", "FLOAT"], "output_is_list": [false, false, false], "output_name": ["images", "audio", "fps"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "GetVideoComponents", "display_name": "Get Video Components", "description": "Extracts all components from a video: frames, audio, and framerate.", "python_module": "comfy_extras.nodes_video", "category": "image/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["extract frames", "split video", "video to images", "demux"], "essentials_category": null, "has_intermediate_output": false}, "LoadVideo": {"input": {"required": {"file": ["COMBO", {"multiselect": false, "options": [], "video_upload": true}]}}, "input_order": {"required": ["file"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "LoadVideo", "display_name": "Load Video", "description": "", "python_module": "comfy_extras.nodes_video", "category": "image/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["import video", "open video", "video file"], "essentials_category": "Basics", "has_intermediate_output": false}, "Video Slice": {"input": {"required": {"video": ["VIDEO", {}], "start_time": ["FLOAT", {"tooltip": "Start time in seconds", "default": 0.0, "min": -100000.0, "max": 100000.0, "step": 0.001}], "duration": ["FLOAT", {"tooltip": "Duration in seconds, or 0 for unlimited duration", "default": 0.0, "min": 0.0, "step": 0.001}], "strict_duration": ["BOOLEAN", {"tooltip": "If True, when the specified duration is not possible, an error will be raised.", "default": false}]}}, "input_order": {"required": ["video", "start_time", "duration", "strict_duration"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Video Slice", "display_name": "Video Slice", "description": "", "python_module": "comfy_extras.nodes_video", "category": "image/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["trim video duration", "skip first frames", "frame load cap", "start time"], "essentials_category": "Video Tools", "has_intermediate_output": false}, "CLIPTextEncodeLumina2": {"input": {"required": {"system_prompt": ["COMBO", {"tooltip": "Lumina2 provide two types of system prompts:Superior: You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts. Alignment: You are an assistant designed to generate high-quality images with the highest degree of image-text alignment based on textual prompts.", "multiselect": false, "options": ["superior", "alignment"]}], "user_prompt": ["STRING", {"tooltip": "The text to be encoded.", "multiline": true, "dynamicPrompts": true}], "clip": ["CLIP", {"tooltip": "The CLIP model used for encoding the text."}]}}, "input_order": {"required": ["system_prompt", "user_prompt", "clip"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": ["A conditioning containing the embedded text used to guide the diffusion model."], "output_matchtypes": null, "name": "CLIPTextEncodeLumina2", "display_name": "CLIP Text Encode for Lumina2", "description": "Encodes a system prompt and a user prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.", "python_module": "comfy_extras.nodes_lumina2", "category": "conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["lumina prompt"], "essentials_category": null, "has_intermediate_output": false}, "RenormCFG": {"input": {"required": {"model": ["MODEL", {}], "cfg_trunc": ["FLOAT", {"advanced": true, "default": 100, "min": 0.0, "max": 100.0, "step": 0.01}], "renorm_cfg": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "cfg_trunc", "renorm_cfg"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "RenormCFG", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lumina2", "category": "advanced/model", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanTrackToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "tracks": ["STRING", {"default": "[]", "multiline": true}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "temperature": ["FLOAT", {"advanced": true, "default": 220.0, "min": 1.0, "max": 1000.0, "step": 0.1}], "topk": ["INT", {"advanced": true, "default": 2, "min": 1, "max": 10}], "start_image": ["IMAGE", {}]}, "optional": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "tracks", "width", "height", "length", "batch_size", "temperature", "topk", "start_image"], "optional": ["clip_vision_output"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanTrackToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["motion tracking", "trajectory video", "point tracking", "keypoint animation"], "essentials_category": null, "has_intermediate_output": false}, "WanImageToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}], "start_image": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["clip_vision_output", "start_image"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanImageToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanFunControlToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}], "start_image": ["IMAGE", {}], "control_video": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["clip_vision_output", "start_image", "control_video"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanFunControlToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Wan22FunControlToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"ref_image": ["IMAGE", {}], "control_video": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["ref_image", "control_video"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "Wan22FunControlToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanFunInpaintToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}], "start_image": ["IMAGE", {}], "end_image": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["clip_vision_output", "start_image", "end_image"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanFunInpaintToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanFirstLastFrameToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"clip_vision_start_image": ["CLIP_VISION_OUTPUT", {}], "clip_vision_end_image": ["CLIP_VISION_OUTPUT", {}], "start_image": ["IMAGE", {}], "end_image": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["clip_vision_start_image", "clip_vision_end_image", "start_image", "end_image"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanFirstLastFrameToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanVaceToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1000.0, "step": 0.01}]}, "optional": {"control_video": ["IMAGE", {}], "control_masks": ["MASK", {}], "reference_image": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size", "strength"], "optional": ["control_video", "control_masks", "reference_image"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["positive", "negative", "latent", "trim_latent"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "WanVaceToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["video conditioning", "video control"], "essentials_category": null, "has_intermediate_output": false}, "TrimVideoLatent": {"input": {"required": {"samples": ["LATENT", {}], "trim_amount": ["INT", {"default": 0, "min": 0, "max": 99999}]}}, "input_order": {"required": ["samples", "trim_amount"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "TrimVideoLatent", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "latent/video", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanCameraImageToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}], "start_image": ["IMAGE", {}], "camera_conditions": ["WAN_CAMERA_EMBEDDING", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["clip_vision_output", "start_image", "camera_conditions"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanCameraImageToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanPhantomSubjectToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"images": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["images"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false, false], "output_name": ["positive", "negative_text", "negative_img_text", "latent"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "WanPhantomSubjectToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanSoundImageToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 77, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"audio_encoder_output": ["AUDIO_ENCODER_OUTPUT", {}], "ref_image": ["IMAGE", {}], "control_video": ["IMAGE", {}], "ref_motion": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["audio_encoder_output", "ref_image", "control_video", "ref_motion"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanSoundImageToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanSoundImageToVideoExtend": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "length": ["INT", {"default": 77, "min": 1, "max": 16384, "step": 4}], "video_latent": ["LATENT", {}]}, "optional": {"audio_encoder_output": ["AUDIO_ENCODER_OUTPUT", {}], "ref_image": ["IMAGE", {}], "control_video": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "length", "video_latent"], "optional": ["audio_encoder_output", "ref_image", "control_video"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanSoundImageToVideoExtend", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanHuMoImageToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 97, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"audio_encoder_output": ["AUDIO_ENCODER_OUTPUT", {}], "ref_image": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["audio_encoder_output", "ref_image"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanHuMoImageToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanAnimateToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 77, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "continue_motion_max_frames": ["INT", {"default": 5, "min": 1, "max": 16384, "step": 4}], "video_frame_offset": ["INT", {"tooltip": "The amount of frames to seek in all the input videos. Used for generating longer videos by chunk. Connect to the video_frame_offset output of the previous node for extending a video.", "default": 0, "min": 0, "max": 16384, "step": 1}]}, "optional": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}], "reference_image": ["IMAGE", {}], "face_video": ["IMAGE", {}], "pose_video": ["IMAGE", {}], "background_video": ["IMAGE", {}], "character_mask": ["MASK", {}], "continue_motion": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size", "continue_motion_max_frames", "video_frame_offset"], "optional": ["clip_vision_output", "reference_image", "face_video", "pose_video", "background_video", "character_mask", "continue_motion"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false, false, false], "output_name": ["positive", "negative", "latent", "trim_latent", "trim_image", "video_frame_offset"], "output_tooltips": [null, null, null, null, null, null], "output_matchtypes": null, "name": "WanAnimateToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Wan22ImageToVideoLatent": {"input": {"required": {"vae": ["VAE", {}], "width": ["INT", {"default": 1280, "min": 32, "max": 16384, "step": 32}], "height": ["INT", {"default": 704, "min": 32, "max": 16384, "step": 32}], "length": ["INT", {"default": 49, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"start_image": ["IMAGE", {}]}}, "input_order": {"required": ["vae", "width", "height", "length", "batch_size"], "optional": ["start_image"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "Wan22ImageToVideoLatent", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/inpaint", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanInfiniteTalkToVideo": {"input": {"required": {"mode": ["COMFY_DYNAMICCOMBO_V3", {"options": [{"key": "single_speaker", "inputs": {"required": {}}}, {"key": "two_speakers", "inputs": {"required": {}, "optional": {"audio_encoder_output_2": ["AUDIO_ENCODER_OUTPUT", {}], "mask_1": ["MASK", {"tooltip": "Mask for the first speaker, required if using two audio inputs."}], "mask_2": ["MASK", {"tooltip": "Mask for the second speaker, required if using two audio inputs."}]}}}]}], "model": ["MODEL", {}], "model_patch": ["MODEL_PATCH", {}], "positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "audio_encoder_output_1": ["AUDIO_ENCODER_OUTPUT", {}], "motion_frame_count": ["INT", {"tooltip": "Number of previous frames to use as motion context.", "advanced": true, "default": 9, "min": 1, "max": 33, "step": 1}], "audio_scale": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}, "optional": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}], "start_image": ["IMAGE", {}], "previous_frames": ["IMAGE", {}]}}, "input_order": {"required": ["mode", "model", "model_patch", "positive", "negative", "vae", "width", "height", "length", "audio_encoder_output_1", "motion_frame_count", "audio_scale"], "optional": ["clip_vision_output", "start_image", "previous_frames"]}, "is_input_list": false, "output": ["MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "INT"], "output_is_list": [false, false, false, false, false], "output_name": ["model", "positive", "negative", "latent", "trim_image"], "output_tooltips": [null, null, null, null, null], "output_matchtypes": null, "name": "WanInfiniteTalkToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanSCAILToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 512, "min": 32, "max": 16384, "step": 32}], "height": ["INT", {"default": 896, "min": 32, "max": 16384, "step": 32}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "pose_strength": ["FLOAT", {"tooltip": "Strength of the pose latent.", "default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "pose_start": ["FLOAT", {"tooltip": "Start step to use pose conditioning.", "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "pose_end": ["FLOAT", {"tooltip": "End step to use pose conditioning.", "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}], "reference_image": ["IMAGE", {}], "pose_video": ["IMAGE", {"tooltip": "Video used for pose conditioning. Will be downscaled to half the resolution of the main video."}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size", "pose_strength", "pose_start", "pose_end"], "optional": ["clip_vision_output", "reference_image", "pose_video"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, "Empty latent of the generation size."], "output_matchtypes": null, "name": "WanSCAILToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wan", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LotusConditioning": {"input": {"required": {}}, "input_order": {"required": []}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["conditioning"], "output_tooltips": [null], "output_matchtypes": null, "name": "LotusConditioning", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_lotus", "category": "conditioning/lotus", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyLatentHunyuan3Dv2": {"input": {"required": {"resolution": ["INT", {"default": 3072, "min": 1, "max": 8192}], "batch_size": ["INT", {"tooltip": "The number of latent images in the batch.", "default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["resolution", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyLatentHunyuan3Dv2", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan3d", "category": "latent/3d", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Hunyuan3Dv2Conditioning": {"input": {"required": {"clip_vision_output": ["CLIP_VISION_OUTPUT", {}]}}, "input_order": {"required": ["clip_vision_output"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "Hunyuan3Dv2Conditioning", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan3d", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Hunyuan3Dv2ConditioningMultiView": {"input": {"required": {}, "optional": {"front": ["CLIP_VISION_OUTPUT", {}], "left": ["CLIP_VISION_OUTPUT", {}], "back": ["CLIP_VISION_OUTPUT", {}], "right": ["CLIP_VISION_OUTPUT", {}]}}, "input_order": {"required": [], "optional": ["front", "left", "back", "right"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "Hunyuan3Dv2ConditioningMultiView", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan3d", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "VAEDecodeHunyuan3D": {"input": {"required": {"samples": ["LATENT", {}], "vae": ["VAE", {}], "num_chunks": ["INT", {"advanced": true, "default": 8000, "min": 1000, "max": 500000}], "octree_resolution": ["INT", {"advanced": true, "default": 256, "min": 16, "max": 512}]}}, "input_order": {"required": ["samples", "vae", "num_chunks", "octree_resolution"]}, "is_input_list": false, "output": ["VOXEL"], "output_is_list": [false], "output_name": ["VOXEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "VAEDecodeHunyuan3D", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan3d", "category": "latent/3d", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "VoxelToMeshBasic": {"input": {"required": {"voxel": ["VOXEL", {}], "threshold": ["FLOAT", {"default": 0.6, "min": -1.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["voxel", "threshold"]}, "is_input_list": false, "output": ["MESH"], "output_is_list": [false], "output_name": ["MESH"], "output_tooltips": [null], "output_matchtypes": null, "name": "VoxelToMeshBasic", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan3d", "category": "3d", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "VoxelToMesh": {"input": {"required": {"voxel": ["VOXEL", {}], "algorithm": ["COMBO", {"advanced": true, "multiselect": false, "options": ["surface net", "basic"]}], "threshold": ["FLOAT", {"default": 0.6, "min": -1.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["voxel", "algorithm", "threshold"]}, "is_input_list": false, "output": ["MESH"], "output_is_list": [false], "output_name": ["MESH"], "output_tooltips": [null], "output_matchtypes": null, "name": "VoxelToMesh", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hunyuan3d", "category": "3d", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SaveGLB": {"input": {"required": {"mesh": ["MESH,FILE_3D_GLB,FILE_3D_GLTF,FILE_3D_OBJ,FILE_3D_FBX,FILE_3D_STL,FILE_3D_USDZ,FILE_3D", {"tooltip": "Mesh or 3D file to save"}], "filename_prefix": ["STRING", {"default": "mesh/ComfyUI", "multiline": false}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["mesh", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "SaveGLB", "display_name": "Save 3D Model", "description": "", "python_module": "comfy_extras.nodes_hunyuan3d", "category": "3d", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["export 3d model", "save mesh"], "essentials_category": "Basics", "has_intermediate_output": false}, "PrimitiveString": {"input": {"required": {"value": ["STRING", {"multiline": false}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "PrimitiveString", "display_name": "String", "description": "", "python_module": "comfy_extras.nodes_primitive", "category": "utils/primitive", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PrimitiveStringMultiline": {"input": {"required": {"value": ["STRING", {"multiline": true}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "PrimitiveStringMultiline", "display_name": "String (Multiline)", "description": "", "python_module": "comfy_extras.nodes_primitive", "category": "utils/primitive", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": "Basics", "has_intermediate_output": false}, "PrimitiveInt": {"input": {"required": {"value": ["INT", {"min": -9223372036854775807, "max": 9223372036854775807, "control_after_generate": true}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "output_tooltips": [null], "output_matchtypes": null, "name": "PrimitiveInt", "display_name": "Int", "description": "", "python_module": "comfy_extras.nodes_primitive", "category": "utils/primitive", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PrimitiveFloat": {"input": {"required": {"value": ["FLOAT", {"min": -9223372036854775807, "max": 9223372036854775807, "step": 0.1}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "output_tooltips": [null], "output_matchtypes": null, "name": "PrimitiveFloat", "display_name": "Float", "description": "", "python_module": "comfy_extras.nodes_primitive", "category": "utils/primitive", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PrimitiveBoolean": {"input": {"required": {"value": ["BOOLEAN", {}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "output_tooltips": [null], "output_matchtypes": null, "name": "PrimitiveBoolean", "display_name": "Boolean", "description": "", "python_module": "comfy_extras.nodes_primitive", "category": "utils/primitive", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CFGZeroStar": {"input": {"required": {"model": ["MODEL", {}]}}, "input_order": {"required": ["model"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["patched_model"], "output_tooltips": [null], "output_matchtypes": null, "name": "CFGZeroStar", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_cfg", "category": "advanced/guidance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CFGNorm": {"input": {"required": {"model": ["MODEL", {}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "strength"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["patched_model"], "output_tooltips": [null], "output_matchtypes": null, "name": "CFGNorm", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_cfg", "category": "advanced/guidance", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "OptimalStepsScheduler": {"input": {"required": {"model_type": ["COMBO", {"multiselect": false, "options": ["FLUX", "Wan", "Chroma"]}], "steps": ["INT", {"default": 20, "min": 3, "max": 1000}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model_type", "steps", "denoise"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "output_tooltips": [null], "output_matchtypes": null, "name": "OptimalStepsScheduler", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_optimalsteps", "category": "sampling/custom_sampling/schedulers", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "QuadrupleCLIPLoader": {"input": {"required": {"clip_name1": ["COMBO", {"multiselect": false, "options": ["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]}], "clip_name2": ["COMBO", {"multiselect": false, "options": ["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]}], "clip_name3": ["COMBO", {"multiselect": false, "options": ["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]}], "clip_name4": ["COMBO", {"multiselect": false, "options": ["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]}]}}, "input_order": {"required": ["clip_name1", "clip_name2", "clip_name3", "clip_name4"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "output_tooltips": [null], "output_matchtypes": null, "name": "QuadrupleCLIPLoader", "display_name": null, "description": "[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct", "python_module": "comfy_extras.nodes_hidream", "category": "advanced/loaders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodeHiDream": {"input": {"required": {"clip": ["CLIP", {}], "clip_l": ["STRING", {"multiline": true, "dynamicPrompts": true}], "clip_g": ["STRING", {"multiline": true, "dynamicPrompts": true}], "t5xxl": ["STRING", {"multiline": true, "dynamicPrompts": true}], "llama": ["STRING", {"multiline": true, "dynamicPrompts": true}]}}, "input_order": {"required": ["clip", "clip_l", "clip_g", "t5xxl", "llama"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodeHiDream", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_hidream", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["hidream prompt"], "essentials_category": null, "has_intermediate_output": false}, "FreSca": {"input": {"required": {"model": ["MODEL", {}], "scale_low": ["FLOAT", {"tooltip": "Scaling factor for low-frequency components", "advanced": true, "default": 1.0, "min": 0, "max": 10, "step": 0.01}], "scale_high": ["FLOAT", {"tooltip": "Scaling factor for high-frequency components", "advanced": true, "default": 1.25, "min": 0, "max": 10, "step": 0.01}], "freq_cutoff": ["INT", {"tooltip": "Number of frequency indices around center to consider as low-frequency", "advanced": true, "default": 20, "min": 1, "max": 10000, "step": 1}]}}, "input_order": {"required": ["model", "scale_low", "scale_high", "freq_cutoff"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "FreSca", "display_name": "FreSca", "description": "Applies frequency-dependent scaling to the guidance", "python_module": "comfy_extras.nodes_fresca", "category": "_for_testing", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["frequency guidance"], "essentials_category": null, "has_intermediate_output": false}, "APG": {"input": {"required": {"model": ["MODEL", {}], "eta": ["FLOAT", {"tooltip": "Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1.", "advanced": true, "default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "norm_threshold": ["FLOAT", {"tooltip": "Normalize guidance vector to this value, normalization disable at a setting of 0.", "advanced": true, "default": 5.0, "min": 0.0, "max": 50.0, "step": 0.1}], "momentum": ["FLOAT", {"tooltip": "Controls a running average of guidance during diffusion, disabled at a setting of 0.", "advanced": true, "default": 0.0, "min": -5.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model", "eta", "norm_threshold", "momentum"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "APG", "display_name": "Adaptive Projected Guidance", "description": "", "python_module": "comfy_extras.nodes_apg", "category": "sampling/custom_sampling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PreviewAny": {"input": {"required": {"source": ["*", {}]}}, "input_order": {"required": ["source"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "PreviewAny", "display_name": "Preview as Text", "description": "", "python_module": "comfy_extras.nodes_preview_any", "category": "utils", "output_node": true, "has_intermediate_output": false, "search_aliases": ["show output", "inspect", "debug", "print value", "show text"]}, "TextEncodeAceStepAudio": {"input": {"required": {"clip": ["CLIP", {}], "tags": ["STRING", {"multiline": true, "dynamicPrompts": true}], "lyrics": ["STRING", {"multiline": true, "dynamicPrompts": true}], "lyrics_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["clip", "tags", "lyrics", "lyrics_strength"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "TextEncodeAceStepAudio", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_ace", "category": "conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyAceStepLatentAudio": {"input": {"required": {"seconds": ["FLOAT", {"default": 120.0, "min": 1.0, "max": 1000.0, "step": 0.1}], "batch_size": ["INT", {"tooltip": "The number of latent images in the batch.", "default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["seconds", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyAceStepLatentAudio", "display_name": "Empty Ace Step 1.0 Latent Audio", "description": "", "python_module": "comfy_extras.nodes_ace", "category": "latent/audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TextEncodeAceStepAudio1.5": {"input": {"required": {"clip": ["CLIP", {}], "tags": ["STRING", {"multiline": true, "dynamicPrompts": true}], "lyrics": ["STRING", {"multiline": true, "dynamicPrompts": true}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "bpm": ["INT", {"default": 120, "min": 10, "max": 300}], "duration": ["FLOAT", {"default": 120.0, "min": 0.0, "max": 2000.0, "step": 0.1}], "timesignature": ["COMBO", {"multiselect": false, "options": ["2", "3", "4", "6"]}], "language": ["COMBO", {"multiselect": false, "options": ["en", "ja", "zh", "es", "de", "fr", "pt", "ru", "it", "nl", "pl", "tr", "vi", "cs", "fa", "id", "ko", "uk", "hu", "ar", "sv", "ro", "el"]}], "keyscale": ["COMBO", {"multiselect": false, "options": ["C major", "C# major", "Db major", "D major", "D# major", "Eb major", "E major", "F major", "F# major", "Gb major", "G major", "G# major", "Ab major", "A major", "A# major", "Bb major", "B major", "C minor", "C# minor", "Db minor", "D minor", "D# minor", "Eb minor", "E minor", "F minor", "F# minor", "Gb minor", "G minor", "G# minor", "Ab minor", "A minor", "A# minor", "Bb minor", "B minor"]}], "generate_audio_codes": ["BOOLEAN", {"tooltip": "Enable the LLM that generates audio codes. This can be slow but will increase the quality of the generated audio. Turn this off if you are giving the model an audio reference.", "advanced": true, "default": true}], "cfg_scale": ["FLOAT", {"advanced": true, "default": 2.0, "min": 0.0, "max": 100.0, "step": 0.1}], "temperature": ["FLOAT", {"advanced": true, "default": 0.85, "min": 0.0, "max": 2.0, "step": 0.01}], "top_p": ["FLOAT", {"advanced": true, "default": 0.9, "min": 0.0, "max": 2000.0, "step": 0.01}], "top_k": ["INT", {"advanced": true, "default": 0, "min": 0, "max": 100}], "min_p": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["clip", "tags", "lyrics", "seed", "bpm", "duration", "timesignature", "language", "keyscale", "generate_audio_codes", "cfg_scale", "temperature", "top_p", "top_k", "min_p"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "TextEncodeAceStepAudio1.5", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_ace", "category": "conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyAceStep1.5LatentAudio": {"input": {"required": {"seconds": ["FLOAT", {"default": 120.0, "min": 1.0, "max": 1000.0, "step": 0.01}], "batch_size": ["INT", {"tooltip": "The number of latent images in the batch.", "default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["seconds", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyAceStep1.5LatentAudio", "display_name": "Empty Ace Step 1.5 Latent Audio", "description": "", "python_module": "comfy_extras.nodes_ace", "category": "latent/audio", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ReferenceTimbreAudio": {"input": {"required": {"conditioning": ["CONDITIONING", {}]}, "optional": {"latent": ["LATENT", {}]}}, "input_order": {"required": ["conditioning"], "optional": ["latent"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "ReferenceTimbreAudio", "display_name": "Reference Audio", "description": "This node sets the reference audio for ace step 1.5", "python_module": "comfy_extras.nodes_ace", "category": "advanced/conditioning/audio", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StringConcatenate": {"input": {"required": {"string_a": ["STRING", {"multiline": true}], "string_b": ["STRING", {"multiline": true}], "delimiter": ["STRING", {"default": "", "multiline": false}]}}, "input_order": {"required": ["string_a", "string_b", "delimiter"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "StringConcatenate", "display_name": "Text Concatenate", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Concatenate", "text concat", "join text", "merge text", "combine strings", "concat", "concatenate", "append text", "combine text", "string"], "essentials_category": null, "has_intermediate_output": false}, "StringSubstring": {"input": {"required": {"string": ["STRING", {"multiline": true}], "start": ["INT", {}], "end": ["INT", {}]}}, "input_order": {"required": ["string", "start", "end"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "StringSubstring", "display_name": "Text Substring", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Substring", "extract text", "text portion"], "essentials_category": null, "has_intermediate_output": false}, "StringLength": {"input": {"required": {"string": ["STRING", {"multiline": true}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["length"], "output_tooltips": [null], "output_matchtypes": null, "name": "StringLength", "display_name": "Text Length", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["character count", "text size", "string length"], "essentials_category": null, "has_intermediate_output": false}, "CaseConverter": {"input": {"required": {"string": ["STRING", {"multiline": true}], "mode": ["COMBO", {"multiselect": false, "options": ["UPPERCASE", "lowercase", "Capitalize", "Title Case"]}]}}, "input_order": {"required": ["string", "mode"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CaseConverter", "display_name": "Text Case Converter", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Case Converter", "text case", "uppercase", "lowercase", "capitalize"], "essentials_category": null, "has_intermediate_output": false}, "StringTrim": {"input": {"required": {"string": ["STRING", {"multiline": true}], "mode": ["COMBO", {"multiselect": false, "options": ["Both", "Left", "Right"]}]}}, "input_order": {"required": ["string", "mode"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "StringTrim", "display_name": "Text Trim", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Trim", "clean whitespace", "remove whitespace", "strip"], "essentials_category": null, "has_intermediate_output": false}, "StringReplace": {"input": {"required": {"string": ["STRING", {"multiline": true}], "find": ["STRING", {"multiline": true}], "replace": ["STRING", {"multiline": true}]}}, "input_order": {"required": ["string", "find", "replace"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "StringReplace", "display_name": "Text Replace", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Replace", "find and replace", "substitute", "swap text"], "essentials_category": null, "has_intermediate_output": false}, "StringContains": {"input": {"required": {"string": ["STRING", {"multiline": true}], "substring": ["STRING", {"multiline": true}], "case_sensitive": ["BOOLEAN", {"advanced": true, "default": true}]}}, "input_order": {"required": ["string", "substring", "case_sensitive"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["contains"], "output_tooltips": [null], "output_matchtypes": null, "name": "StringContains", "display_name": "Text Contains", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Contains", "text includes", "string includes"], "essentials_category": null, "has_intermediate_output": false}, "StringCompare": {"input": {"required": {"string_a": ["STRING", {"multiline": true}], "string_b": ["STRING", {"multiline": true}], "mode": ["COMBO", {"multiselect": false, "options": ["Starts With", "Ends With", "Equal"]}], "case_sensitive": ["BOOLEAN", {"advanced": true, "default": true}]}}, "input_order": {"required": ["string_a", "string_b", "mode", "case_sensitive"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "output_tooltips": [null], "output_matchtypes": null, "name": "StringCompare", "display_name": "Text Compare", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Compare", "text match", "string equals", "starts with", "ends with"], "essentials_category": null, "has_intermediate_output": false}, "RegexMatch": {"input": {"required": {"string": ["STRING", {"multiline": true}], "regex_pattern": ["STRING", {"multiline": true}], "case_insensitive": ["BOOLEAN", {"advanced": true, "default": true}], "multiline": ["BOOLEAN", {"advanced": true, "default": false}], "dotall": ["BOOLEAN", {"advanced": true, "default": false}]}}, "input_order": {"required": ["string", "regex_pattern", "case_insensitive", "multiline", "dotall"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["matches"], "output_tooltips": [null], "output_matchtypes": null, "name": "RegexMatch", "display_name": "Text Match", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Regex Match", "regex", "pattern match", "text contains", "string match"], "essentials_category": null, "has_intermediate_output": false}, "RegexExtract": {"input": {"required": {"string": ["STRING", {"multiline": true}], "regex_pattern": ["STRING", {"multiline": true}], "mode": ["COMBO", {"multiselect": false, "options": ["First Match", "All Matches", "First Group", "All Groups"]}], "case_insensitive": ["BOOLEAN", {"advanced": true, "default": true}], "multiline": ["BOOLEAN", {"advanced": true, "default": false}], "dotall": ["BOOLEAN", {"advanced": true, "default": false}], "group_index": ["INT", {"advanced": true, "default": 1, "min": 0, "max": 100}]}}, "input_order": {"required": ["string", "regex_pattern", "mode", "case_insensitive", "multiline", "dotall", "group_index"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "RegexExtract", "display_name": "Text Extract Substring", "description": "", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Regex Extract", "regex", "pattern extract", "text parser", "parse text"], "essentials_category": null, "has_intermediate_output": false}, "RegexReplace": {"input": {"required": {"string": ["STRING", {"multiline": true}], "regex_pattern": ["STRING", {"multiline": true}], "replace": ["STRING", {"multiline": true}]}, "optional": {"case_insensitive": ["BOOLEAN", {"advanced": true, "default": true}], "multiline": ["BOOLEAN", {"advanced": true, "default": false}], "dotall": ["BOOLEAN", {"tooltip": "When enabled, the dot (.) character will match any character including newline characters. When disabled, dots won't match newlines.", "advanced": true, "default": false}], "count": ["INT", {"tooltip": "Maximum number of replacements to make. Set to 0 to replace all occurrences (default). Set to 1 to replace only the first match, 2 for the first two matches, etc.", "advanced": true, "default": 0, "min": 0, "max": 100}]}}, "input_order": {"required": ["string", "regex_pattern", "replace"], "optional": ["case_insensitive", "multiline", "dotall", "count"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "RegexReplace", "display_name": "Text Replace (Regex)", "description": "Find and replace text using regex patterns.", "python_module": "comfy_extras.nodes_string", "category": "utils/string", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Regex Replace", "regex", "pattern replace", "regex replace", "substitution"], "essentials_category": null, "has_intermediate_output": false}, "WanCameraEmbedding": {"input": {"required": {"camera_pose": ["COMBO", {"default": "Static", "multiselect": false, "options": ["Static", "Pan Up", "Pan Down", "Pan Left", "Pan Right", "Zoom In", "Zoom Out", "Anti Clockwise (ACW)", "ClockWise (CW)"]}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}]}, "optional": {"speed": ["FLOAT", {"default": 1.0, "min": 0, "max": 10.0, "step": 0.1}], "fx": ["FLOAT", {"advanced": true, "default": 0.5, "min": 0, "max": 1, "step": 1e-09}], "fy": ["FLOAT", {"advanced": true, "default": 0.5, "min": 0, "max": 1, "step": 1e-09}], "cx": ["FLOAT", {"advanced": true, "default": 0.5, "min": 0, "max": 1, "step": 0.01}], "cy": ["FLOAT", {"advanced": true, "default": 0.5, "min": 0, "max": 1, "step": 0.01}]}}, "input_order": {"required": ["camera_pose", "width", "height", "length"], "optional": ["speed", "fx", "fy", "cx", "cy"]}, "is_input_list": false, "output": ["WAN_CAMERA_EMBEDDING", "INT", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["camera_embedding", "width", "height", "length"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "WanCameraEmbedding", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_camera_trajectory", "category": "camera", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ReferenceLatent": {"input": {"required": {"conditioning": ["CONDITIONING", {}]}, "optional": {"latent": ["LATENT", {}]}}, "input_order": {"required": ["conditioning"], "optional": ["latent"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "ReferenceLatent", "display_name": null, "description": "This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.", "python_module": "comfy_extras.nodes_edit_model", "category": "advanced/conditioning/edit_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TCFG": {"input": {"required": {"model": ["MODEL", {}]}}, "input_order": {"required": ["model"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["patched_model"], "output_tooltips": [null], "output_matchtypes": null, "name": "TCFG", "display_name": "Tangential Damping CFG", "description": "TCFG \u2013 Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality.", "python_module": "comfy_extras.nodes_tcfg", "category": "advanced/guidance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ContextWindowsManual": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model to apply context windows to during sampling."}], "context_length": ["INT", {"tooltip": "The length of the context window.", "advanced": true, "default": 16, "min": 1}], "context_overlap": ["INT", {"tooltip": "The overlap of the context window.", "advanced": true, "default": 4, "min": 0}], "context_schedule": ["COMBO", {"tooltip": "The stride of the context window.", "multiselect": false, "options": ["standard_static", "standard_uniform", "looped_uniform", "batched"]}], "context_stride": ["INT", {"tooltip": "The stride of the context window; only applicable to uniform schedules.", "advanced": true, "default": 1, "min": 1}], "closed_loop": ["BOOLEAN", {"tooltip": "Whether to close the context window loop; only applicable to looped schedules.", "default": false}], "fuse_method": ["COMBO", {"tooltip": "The method to use to fuse the context windows.", "default": "pyramid", "multiselect": false, "options": ["pyramid", "relative", "flat", "overlap-linear"]}], "dim": ["INT", {"tooltip": "The dimension to apply the context windows to.", "default": 0, "min": 0, "max": 5}], "freenoise": ["BOOLEAN", {"tooltip": "Whether to apply FreeNoise noise shuffling, improves window blending.", "default": false}], "cond_retain_index_list": ["STRING", {"tooltip": "List of latent indices to retain in the conditioning tensors for each window, for example setting this to '0' will use the initial start image for each window.", "default": "", "multiline": false}], "split_conds_to_windows": ["BOOLEAN", {"tooltip": "Whether to split multiple conditionings (created by ConditionCombine) to each window based on region index.", "default": false}]}}, "input_order": {"required": ["model", "context_length", "context_overlap", "context_schedule", "context_stride", "closed_loop", "fuse_method", "dim", "freenoise", "cond_retain_index_list", "split_conds_to_windows"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": ["The model with context windows applied during sampling."], "output_matchtypes": null, "name": "ContextWindowsManual", "display_name": "Context Windows (Manual)", "description": "Manually set context windows.", "python_module": "comfy_extras.nodes_context_windows", "category": "context", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanContextWindowsManual": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model to apply context windows to during sampling."}], "context_length": ["INT", {"tooltip": "The length of the context window.", "advanced": true, "default": 81, "min": 1, "max": 16384, "step": 4}], "context_overlap": ["INT", {"tooltip": "The overlap of the context window.", "advanced": true, "default": 30, "min": 0}], "context_schedule": ["COMBO", {"tooltip": "The stride of the context window.", "multiselect": false, "options": ["standard_static", "standard_uniform", "looped_uniform", "batched"]}], "context_stride": ["INT", {"tooltip": "The stride of the context window; only applicable to uniform schedules.", "advanced": true, "default": 1, "min": 1}], "closed_loop": ["BOOLEAN", {"tooltip": "Whether to close the context window loop; only applicable to looped schedules.", "default": false}], "fuse_method": ["COMBO", {"tooltip": "The method to use to fuse the context windows.", "default": "pyramid", "multiselect": false, "options": ["pyramid", "relative", "flat", "overlap-linear"]}], "freenoise": ["BOOLEAN", {"tooltip": "Whether to apply FreeNoise noise shuffling, improves window blending.", "default": false}]}}, "input_order": {"required": ["model", "context_length", "context_overlap", "context_schedule", "context_stride", "closed_loop", "fuse_method", "freenoise"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": ["The model with context windows applied during sampling."], "output_matchtypes": null, "name": "WanContextWindowsManual", "display_name": "WAN Context Windows (Manual)", "description": "Manually set context windows for WAN-like models (dim=2).", "python_module": "comfy_extras.nodes_context_windows", "category": "context", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TextEncodeQwenImageEdit": {"input": {"required": {"clip": ["CLIP", {}], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"vae": ["VAE", {}], "image": ["IMAGE", {}]}}, "input_order": {"required": ["clip", "prompt"], "optional": ["vae", "image"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "TextEncodeQwenImageEdit", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_qwen", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TextEncodeQwenImageEditPlus": {"input": {"required": {"clip": ["CLIP", {}], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"vae": ["VAE", {}], "image1": ["IMAGE", {}], "image2": ["IMAGE", {}], "image3": ["IMAGE", {}]}}, "input_order": {"required": ["clip", "prompt"], "optional": ["vae", "image1", "image2", "image3"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "TextEncodeQwenImageEditPlus", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_qwen", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyQwenImageLayeredLatentImage": {"input": {"required": {"width": ["INT", {"default": 640, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 640, "min": 16, "max": 16384, "step": 16}], "layers": ["INT", {"advanced": true, "default": 3, "min": 0, "max": 16384, "step": 1}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "layers", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyQwenImageLayeredLatentImage", "display_name": "Empty Qwen Image Layered Latent", "description": "", "python_module": "comfy_extras.nodes_qwen", "category": "latent/qwen", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "EmptyChromaRadianceLatentImage": {"input": {"required": {"width": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 16}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["width", "height", "batch_size"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "output_tooltips": [null], "output_matchtypes": null, "name": "EmptyChromaRadianceLatentImage", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_chroma_radiance", "category": "latent/chroma_radiance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ChromaRadianceOptions": {"input": {"required": {"model": ["MODEL", {}], "preserve_wrapper": ["BOOLEAN", {"tooltip": "When enabled, will delegate to an existing model function wrapper if it exists. Generally should be left enabled.", "default": true}], "start_sigma": ["FLOAT", {"tooltip": "First sigma that these options will be in effect.", "advanced": true, "default": 1.0, "min": 0.0, "max": 1.0}], "end_sigma": ["FLOAT", {"tooltip": "Last sigma that these options will be in effect.", "advanced": true, "default": 0.0, "min": 0.0, "max": 1.0}], "nerf_tile_size": ["INT", {"tooltip": "Allows overriding the default NeRF tile size. -1 means use the default (32). 0 means use non-tiling mode (may require a lot of VRAM).", "advanced": true, "default": -1, "min": -1}]}}, "input_order": {"required": ["model", "preserve_wrapper", "start_sigma", "end_sigma", "nerf_tile_size"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "ChromaRadianceOptions", "display_name": null, "description": "Allows setting advanced options for the Chroma Radiance model.", "python_module": "comfy_extras.nodes_chroma_radiance", "category": "model_patches/chroma_radiance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ModelPatchLoader": {"input": {"required": {"name": [["qwen_image_canny_diffsynth_controlnet.safetensors", "qwen_image_depth_diffsynth_controlnet.safetensors", "qwen_image_inpaint_diffsynth_controlnet.safetensors", "uso-flux1-projector-v1.safetensors", "wan2.1_infiniteTalk_multi_fp16.safetensors", "wan2.1_infiniteTalk_single_fp16.safetensors"]]}}, "input_order": {"required": ["name"]}, "is_input_list": false, "output": ["MODEL_PATCH"], "output_is_list": [false], "output_name": ["MODEL_PATCH"], "name": "ModelPatchLoader", "display_name": "ModelPatchLoader", "description": "", "python_module": "comfy_extras.nodes_model_patch", "category": "advanced/loaders", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "QwenImageDiffsynthControlnet": {"input": {"required": {"model": ["MODEL"], "model_patch": ["MODEL_PATCH"], "vae": ["VAE"], "image": ["IMAGE"], "strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}, "optional": {"mask": ["MASK"]}}, "input_order": {"required": ["model", "model_patch", "vae", "image", "strength"], "optional": ["mask"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "QwenImageDiffsynthControlnet", "display_name": "QwenImageDiffsynthControlnet", "description": "", "python_module": "comfy_extras.nodes_model_patch", "category": "advanced/loaders/qwen", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "ZImageFunControlnet": {"input": {"required": {"model": ["MODEL"], "model_patch": ["MODEL_PATCH"], "vae": ["VAE"], "strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}, "optional": {"image": ["IMAGE"], "inpaint_image": ["IMAGE"], "mask": ["MASK"]}}, "input_order": {"required": ["model", "model_patch", "vae", "strength"], "optional": ["image", "inpaint_image", "mask"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ZImageFunControlnet", "display_name": "ZImageFunControlnet", "description": "", "python_module": "comfy_extras.nodes_model_patch", "category": "advanced/loaders/zimage", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "USOStyleReference": {"input": {"required": {"model": ["MODEL"], "model_patch": ["MODEL_PATCH"], "clip_vision_output": ["CLIP_VISION_OUTPUT"]}}, "input_order": {"required": ["model", "model_patch", "clip_vision_output"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "USOStyleReference", "display_name": "USOStyleReference", "description": "", "python_module": "comfy_extras.nodes_model_patch", "category": "advanced/model_patches/flux", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "EasyCache": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model to add EasyCache to."}], "reuse_threshold": ["FLOAT", {"tooltip": "The threshold for reusing cached steps.", "advanced": true, "default": 0.2, "min": 0.0, "max": 3.0, "step": 0.01}], "start_percent": ["FLOAT", {"tooltip": "The relative sampling step to begin use of EasyCache.", "advanced": true, "default": 0.15, "min": 0.0, "max": 1.0, "step": 0.01}], "end_percent": ["FLOAT", {"tooltip": "The relative sampling step to end use of EasyCache.", "advanced": true, "default": 0.95, "min": 0.0, "max": 1.0, "step": 0.01}], "verbose": ["BOOLEAN", {"tooltip": "Whether to log verbose information.", "advanced": true, "default": false}]}}, "input_order": {"required": ["model", "reuse_threshold", "start_percent", "end_percent", "verbose"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": ["The model with EasyCache."], "output_matchtypes": null, "name": "EasyCache", "display_name": "EasyCache", "description": "Native EasyCache implementation.", "python_module": "comfy_extras.nodes_easycache", "category": "advanced/debug/model", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LazyCache": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model to add LazyCache to."}], "reuse_threshold": ["FLOAT", {"tooltip": "The threshold for reusing cached steps.", "advanced": true, "default": 0.2, "min": 0.0, "max": 3.0, "step": 0.01}], "start_percent": ["FLOAT", {"tooltip": "The relative sampling step to begin use of LazyCache.", "advanced": true, "default": 0.15, "min": 0.0, "max": 1.0, "step": 0.01}], "end_percent": ["FLOAT", {"tooltip": "The relative sampling step to end use of LazyCache.", "advanced": true, "default": 0.95, "min": 0.0, "max": 1.0, "step": 0.01}], "verbose": ["BOOLEAN", {"tooltip": "Whether to log verbose information.", "advanced": true, "default": false}]}}, "input_order": {"required": ["model", "reuse_threshold", "start_percent", "end_percent", "verbose"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": ["The model with LazyCache."], "output_matchtypes": null, "name": "LazyCache", "display_name": "LazyCache", "description": "A homebrew version of EasyCache - even 'easier' version of EasyCache to implement. Overall works worse than EasyCache, but better in some rare cases AND universal compatibility with everything in ComfyUI.", "python_module": "comfy_extras.nodes_easycache", "category": "advanced/debug/model", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AudioEncoderLoader": {"input": {"required": {"audio_encoder_name": ["COMBO", {"multiselect": false, "options": ["wav2vec2_large_english_fp16.safetensors", "whisper_large_v3_fp16.safetensors"]}]}}, "input_order": {"required": ["audio_encoder_name"]}, "is_input_list": false, "output": ["AUDIO_ENCODER"], "output_is_list": [false], "output_name": ["AUDIO_ENCODER"], "output_tooltips": [null], "output_matchtypes": null, "name": "AudioEncoderLoader", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_audio_encoder", "category": "loaders", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AudioEncoderEncode": {"input": {"required": {"audio_encoder": ["AUDIO_ENCODER", {}], "audio": ["AUDIO", {}]}}, "input_order": {"required": ["audio_encoder", "audio"]}, "is_input_list": false, "output": ["AUDIO_ENCODER_OUTPUT"], "output_is_list": [false], "output_name": ["AUDIO_ENCODER_OUTPUT"], "output_tooltips": [null], "output_matchtypes": null, "name": "AudioEncoderEncode", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_audio_encoder", "category": "conditioning", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ScaleROPE": {"input": {"required": {"model": ["MODEL", {}], "scale_x": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1}], "shift_x": ["FLOAT", {"advanced": true, "default": 0.0, "min": -256.0, "max": 256.0, "step": 0.1}], "scale_y": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1}], "shift_y": ["FLOAT", {"advanced": true, "default": 0.0, "min": -256.0, "max": 256.0, "step": 0.1}], "scale_t": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1}], "shift_t": ["FLOAT", {"advanced": true, "default": 0.0, "min": -256.0, "max": 256.0, "step": 0.1}]}}, "input_order": {"required": ["model", "scale_x", "shift_x", "scale_y", "shift_y", "scale_t", "shift_t"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "ScaleROPE", "display_name": null, "description": "Scale and shift the ROPE of the model.", "python_module": "comfy_extras.nodes_rope", "category": "advanced/model_patches", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ComfySwitchNode": {"input": {"required": {"switch": ["BOOLEAN", {}], "on_false": ["COMFY_MATCHTYPE_V3", {"lazy": true, "template": {"template_id": "switch", "allowed_types": "*"}}], "on_true": ["COMFY_MATCHTYPE_V3", {"lazy": true, "template": {"template_id": "switch", "allowed_types": "*"}}]}}, "input_order": {"required": ["switch", "on_false", "on_true"]}, "is_input_list": false, "output": ["COMFY_MATCHTYPE_V3"], "output_is_list": [false], "output_name": ["output"], "output_tooltips": [null], "output_matchtypes": ["switch"], "name": "ComfySwitchNode", "display_name": "Switch", "description": "", "python_module": "comfy_extras.nodes_logic", "category": "logic", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CustomCombo": {"input": {"required": {"choice": ["COMBO", {"multiselect": false, "options": []}]}}, "input_order": {"required": ["choice"]}, "is_input_list": false, "output": ["STRING", "INT"], "output_is_list": [false, false], "output_name": ["STRING", "INDEX"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "CustomCombo", "display_name": "Custom Combo", "description": "", "python_module": "comfy_extras.nodes_logic", "category": "utils", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ResolutionSelector": {"input": {"required": {"aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio for the output dimensions.", "default": "1:1 (Square)", "multiselect": false, "options": ["1:1 (Square)", "3:2 (Photo)", "4:3 (Standard)", "16:9 (Widescreen)", "21:9 (Ultrawide)", "2:3 (Portrait Photo)", "3:4 (Portrait Standard)", "9:16 (Portrait Widescreen)"]}], "megapixels": ["FLOAT", {"tooltip": "Target total megapixels. 1.0 MP \u2248 1024\u00d71024 for square.", "default": 1.0, "min": 0.1, "max": 16.0, "step": 0.1}]}}, "input_order": {"required": ["aspect_ratio", "megapixels"]}, "is_input_list": false, "output": ["INT", "INT"], "output_is_list": [false, false], "output_name": ["width", "height"], "output_tooltips": ["Calculated width in pixels (multiple of 8).", "Calculated height in pixels (multiple of 8)."], "output_matchtypes": null, "name": "ResolutionSelector", "display_name": "Resolution Selector", "description": "Calculate width and height from aspect ratio and megapixel target. Useful for setting up Empty Latent Image dimensions.", "python_module": "comfy_extras.nodes_resolution", "category": "utils", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "wanBlockSwap": {"input": {"required": {"model": ["MODEL", {}]}}, "input_order": {"required": ["model"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": [null], "output_matchtypes": null, "name": "wanBlockSwap", "display_name": null, "description": "NOP", "python_module": "comfy_extras.nodes_nop", "category": "", "output_node": false, "deprecated": true, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Kandinsky5ImageToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "width": ["INT", {"default": 768, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 512, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 121, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"start_image": ["IMAGE", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "width", "height", "length", "batch_size"], "optional": ["start_image"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT", "LATENT"], "output_is_list": [false, false, false, false], "output_name": ["positive", "negative", "latent", "cond_latent"], "output_tooltips": [null, null, "Empty video latent", "Clean encoded start images, used to replace the noisy start of the model output latents"], "output_matchtypes": null, "name": "Kandinsky5ImageToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_kandinsky5", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "NormalizeVideoLatentStart": {"input": {"required": {"latent": ["LATENT", {}], "start_frame_count": ["INT", {"tooltip": "Number of latent frames to normalize, counted from the start", "default": 4, "min": 1, "max": 16384, "step": 1}], "reference_frame_count": ["INT", {"tooltip": "Number of latent frames after the start frames to use as reference", "default": 5, "min": 1, "max": 16384, "step": 1}]}}, "input_order": {"required": ["latent", "start_frame_count", "reference_frame_count"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["latent"], "output_tooltips": [null], "output_matchtypes": null, "name": "NormalizeVideoLatentStart", "display_name": null, "description": "Normalizes the initial frames of a video latent to match the mean and standard deviation of subsequent reference frames. Helps reduce differences between the starting frames and the rest of the video.", "python_module": "comfy_extras.nodes_kandinsky5", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CLIPTextEncodeKandinsky5": {"input": {"required": {"clip": ["CLIP", {}], "clip_l": ["STRING", {"multiline": true, "dynamicPrompts": true}], "qwen25_7b": ["STRING", {"multiline": true, "dynamicPrompts": true}]}}, "input_order": {"required": ["clip", "clip_l", "qwen25_7b"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "CLIPTextEncodeKandinsky5", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_kandinsky5", "category": "advanced/conditioning/kandinsky5", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["kandinsky prompt"], "essentials_category": null, "has_intermediate_output": false}, "WanMoveTrackToVideo": {"input": {"required": {"positive": ["CONDITIONING", {}], "negative": ["CONDITIONING", {}], "vae": ["VAE", {}], "strength": ["FLOAT", {"tooltip": "Strength of the track conditioning.", "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}], "width": ["INT", {"default": 832, "min": 16, "max": 16384, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 16384, "step": 16}], "length": ["INT", {"default": 81, "min": 1, "max": 16384, "step": 4}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "start_image": ["IMAGE", {}]}, "optional": {"tracks": ["TRACKS", {}], "clip_vision_output": ["CLIP_VISION_OUTPUT", {}]}}, "input_order": {"required": ["positive", "negative", "vae", "strength", "width", "height", "length", "batch_size", "start_image"], "optional": ["tracks", "clip_vision_output"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "WanMoveTrackToVideo", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wanmove", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanMoveTracksFromCoords": {"input": {"required": {}, "optional": {"track_coords": ["STRING", {"default": "[]", "forceInput": true, "multiline": false}], "track_mask": ["MASK", {}]}}, "input_order": {"required": [], "optional": ["track_coords", "track_mask"]}, "is_input_list": false, "output": ["TRACKS", "INT"], "output_is_list": [false, false], "output_name": ["TRACKS", "track_length"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "WanMoveTracksFromCoords", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wanmove", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanMoveConcatTrack": {"input": {"required": {"tracks_1": ["TRACKS", {}]}, "optional": {"tracks_2": ["TRACKS", {}]}}, "input_order": {"required": ["tracks_1"], "optional": ["tracks_2"]}, "is_input_list": false, "output": ["TRACKS"], "output_is_list": [false], "output_name": ["TRACKS"], "output_tooltips": [null], "output_matchtypes": null, "name": "WanMoveConcatTrack", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wanmove", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanMoveVisualizeTracks": {"input": {"required": {"images": ["IMAGE", {}], "line_resolution": ["INT", {"default": 24, "min": 1, "max": 1024}], "circle_size": ["INT", {"advanced": true, "default": 12, "min": 1, "max": 128}], "opacity": ["FLOAT", {"default": 0.75, "min": 0.0, "max": 1.0, "step": 0.01}], "line_width": ["INT", {"advanced": true, "default": 16, "min": 1, "max": 128}]}, "optional": {"tracks": ["TRACKS", {}]}}, "input_order": {"required": ["images", "line_resolution", "circle_size", "opacity", "line_width"], "optional": ["tracks"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "WanMoveVisualizeTracks", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wanmove", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GenerateTracks": {"input": {"required": {"width": ["INT", {"default": 832, "min": 16, "max": 4096, "step": 16}], "height": ["INT", {"default": 480, "min": 16, "max": 4096, "step": 16}], "start_x": ["FLOAT", {"tooltip": "Normalized X coordinate (0-1) for start position.", "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "start_y": ["FLOAT", {"tooltip": "Normalized Y coordinate (0-1) for start position.", "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "end_x": ["FLOAT", {"tooltip": "Normalized X coordinate (0-1) for end position.", "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "end_y": ["FLOAT", {"tooltip": "Normalized Y coordinate (0-1) for end position.", "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "num_frames": ["INT", {"default": 81, "min": 1, "max": 1024}], "num_tracks": ["INT", {"default": 5, "min": 1, "max": 100}], "track_spread": ["FLOAT", {"tooltip": "Normalized distance between tracks. Tracks are spread perpendicular to the motion direction.", "default": 0.025, "min": 0.0, "max": 1.0, "step": 0.001}], "bezier": ["BOOLEAN", {"tooltip": "Enable Bezier curve path using the mid point as control point.", "default": false}], "mid_x": ["FLOAT", {"tooltip": "Normalized X control point for Bezier curve. Only used when 'bezier' is enabled.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "mid_y": ["FLOAT", {"tooltip": "Normalized Y control point for Bezier curve. Only used when 'bezier' is enabled.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "interpolation": ["COMBO", {"tooltip": "Controls the timing/speed of movement along the path.", "multiselect": false, "options": ["linear", "ease_in", "ease_out", "ease_in_out", "constant"]}]}, "optional": {"track_mask": ["MASK", {"tooltip": "Optional mask to indicate visible frames."}]}}, "input_order": {"required": ["width", "height", "start_x", "start_y", "end_x", "end_y", "num_frames", "num_tracks", "track_spread", "bezier", "mid_x", "mid_y", "interpolation"], "optional": ["track_mask"]}, "is_input_list": false, "output": ["TRACKS", "INT"], "output_is_list": [false, false], "output_name": ["TRACKS", "track_length"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "GenerateTracks", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_wanmove", "category": "conditioning/video_models", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["motion paths", "camera movement", "trajectory"], "essentials_category": null, "has_intermediate_output": false}, "ImageCompare": {"input": {"required": {"compare_view": ["IMAGECOMPARE", {"socketless": true}]}, "optional": {"image_a": ["IMAGE", {}], "image_b": ["IMAGE", {}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["compare_view"], "optional": ["image_a", "image_b"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "ImageCompare", "display_name": "Image Compare", "description": "Compares two images side by side with a slider.", "python_module": "comfy_extras.nodes_image_compare", "category": "image", "output_node": true, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": "Image Tools", "has_intermediate_output": false}, "TextEncodeZImageOmni": {"input": {"required": {"clip": ["CLIP", {}], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}], "auto_resize_images": ["BOOLEAN", {"advanced": true, "default": true}]}, "optional": {"image_encoder": ["CLIP_VISION", {}], "vae": ["VAE", {}], "image1": ["IMAGE", {}], "image2": ["IMAGE", {}], "image3": ["IMAGE", {}]}}, "input_order": {"required": ["clip", "prompt", "auto_resize_images"], "optional": ["image_encoder", "vae", "image1", "image2", "image3"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "output_tooltips": [null], "output_matchtypes": null, "name": "TextEncodeZImageOmni", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_zimage", "category": "advanced/conditioning", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GLSLShader": {"input": {"required": {"fragment_shader": ["STRING", {"tooltip": "GLSL fragment shader source code (GLSL ES 3.00 / WebGL 2.0 compatible)", "default": "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nvoid main() {\n fragColor0 = texture(u_image0, v_texCoord);\n}\n", "multiline": true}], "size_mode": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Output size: 'from_input' uses first input image dimensions, 'custom' allows manual size", "options": [{"key": "from_input", "inputs": {"required": {}}}, {"key": "custom", "inputs": {"required": {"width": ["INT", {"default": 512, "min": 1, "max": 16384}], "height": ["INT", {"default": 512, "min": 1, "max": 16384}]}}}]}], "images": ["COMFY_AUTOGROW_V3", {"tooltip": "Images are available as u_image0-4 (sampler2D) in the shader code", "template": {"input": {"required": {"image": ["IMAGE", {}]}}, "prefix": "image", "min": 1, "max": 5}}], "floats": ["COMFY_AUTOGROW_V3", {"tooltip": "Floats are available as u_float0-19 in the shader code", "template": {"input": {"required": {"float": ["FLOAT", {"default": 0.0, "forceInput": true}]}}, "prefix": "u_float", "min": 0, "max": 20}}], "ints": ["COMFY_AUTOGROW_V3", {"tooltip": "Ints are available as u_int0-19 in the shader code", "template": {"input": {"required": {"int": ["INT", {"default": 0, "forceInput": true}]}}, "prefix": "u_int", "min": 0, "max": 20}}], "bools": ["COMFY_AUTOGROW_V3", {"tooltip": "Booleans are available as u_bool0-9 (bool) in the shader code", "template": {"input": {"required": {"bool": ["BOOLEAN", {"default": false, "forceInput": true}]}}, "prefix": "u_bool", "min": 0, "max": 10}}], "curves": ["COMFY_AUTOGROW_V3", {"tooltip": "Curves are available as u_curve0-3 (sampler2D, 1D LUT) in the shader code. Sample with texture(u_curve0, vec2(x, 0.5)).r", "template": {"input": {"required": {"curve": ["CURVE", {"default": {"points": [[0.0, 0.0], [1.0, 1.0]], "interpolation": "monotone_cubic"}, "socketless": true, "forceInput": true}]}}, "prefix": "u_curve", "min": 0, "max": 4}}]}}, "input_order": {"required": ["fragment_shader", "size_mode", "images", "floats", "ints", "bools", "curves"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE0", "IMAGE1", "IMAGE2", "IMAGE3"], "output_tooltips": ["Available via layout(location = 0) out vec4 fragColor0 in the shader code", "Available via layout(location = 1) out vec4 fragColor1 in the shader code", "Available via layout(location = 2) out vec4 fragColor2 in the shader code", "Available via layout(location = 3) out vec4 fragColor3 in the shader code"], "output_matchtypes": null, "name": "GLSLShader", "display_name": "GLSL Shader", "description": "Apply GLSL ES fragment shaders to images. u_resolution (vec2) is always available.", "python_module": "comfy_extras.nodes_glsl", "category": "image/shader", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": true}, "LoraLoaderBypass": {"input": {"required": {"model": ["MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}], "clip": ["CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}], "lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"tooltip": "The name of the LoRA."}], "strength_model": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}], "strength_clip": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}]}}, "input_order": {"required": ["model", "clip", "lora_name", "strength_model", "strength_clip"]}, "is_input_list": false, "output": ["MODEL", "CLIP"], "output_is_list": [false, false], "output_name": ["MODEL", "CLIP"], "name": "LoraLoaderBypass", "display_name": "Load LoRA (Bypass) (For debugging)", "description": "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios.", "python_module": "comfy_extras.nodes_lora_debug", "category": "loaders", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model.", "The modified CLIP model."], "experimental": true, "search_aliases": []}, "LoraLoaderBypassModelOnly": {"input": {"required": {"model": ["MODEL"], "lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "strength_model": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["model", "lora_name", "strength_model"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "LoraLoaderBypassModelOnly", "display_name": "Load LoRA (Bypass, Model Only) (for debugging)", "description": "Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios.", "python_module": "comfy_extras.nodes_lora_debug", "category": "loaders", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model.", "The modified CLIP model."], "experimental": true, "search_aliases": []}, "TextGenerate": {"input": {"required": {"clip": ["CLIP", {}], "prompt": ["STRING", {"default": "", "multiline": true, "dynamicPrompts": true}], "max_length": ["INT", {"default": 256, "min": 1, "max": 2048}], "sampling_mode": ["COMFY_DYNAMICCOMBO_V3", {"display_name": "Sampling Mode", "options": [{"key": "on", "inputs": {"required": {"temperature": ["FLOAT", {"default": 0.7, "min": 0.01, "max": 2.0, "step": 1e-06}], "top_k": ["INT", {"default": 64, "min": 0, "max": 1000}], "top_p": ["FLOAT", {"default": 0.95, "min": 0.0, "max": 1.0, "step": 0.01}], "min_p": ["FLOAT", {"default": 0.05, "min": 0.0, "max": 1.0, "step": 0.01}], "repetition_penalty": ["FLOAT", {"default": 1.05, "min": 0.0, "max": 5.0, "step": 0.01}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}]}, "optional": {"presence_penalty": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 5.0, "step": 0.01}]}}}, {"key": "off", "inputs": {"required": {}}}]}]}, "optional": {"image": ["IMAGE", {}], "thinking": ["BOOLEAN", {"tooltip": "Operate in thinking mode if the model supports it.", "default": false}]}}, "input_order": {"required": ["clip", "prompt", "max_length", "sampling_mode"], "optional": ["image", "thinking"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["generated_text"], "output_tooltips": [null], "output_matchtypes": null, "name": "TextGenerate", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_textgen", "category": "textgen", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["LLM", "gemma"], "essentials_category": null, "has_intermediate_output": false}, "TextGenerateLTX2Prompt": {"input": {"required": {"clip": ["CLIP", {}], "prompt": ["STRING", {"default": "", "multiline": true, "dynamicPrompts": true}], "max_length": ["INT", {"default": 256, "min": 1, "max": 2048}], "sampling_mode": ["COMFY_DYNAMICCOMBO_V3", {"display_name": "Sampling Mode", "options": [{"key": "on", "inputs": {"required": {"temperature": ["FLOAT", {"default": 0.7, "min": 0.01, "max": 2.0, "step": 1e-06}], "top_k": ["INT", {"default": 64, "min": 0, "max": 1000}], "top_p": ["FLOAT", {"default": 0.95, "min": 0.0, "max": 1.0, "step": 0.01}], "min_p": ["FLOAT", {"default": 0.05, "min": 0.0, "max": 1.0, "step": 0.01}], "repetition_penalty": ["FLOAT", {"default": 1.05, "min": 0.0, "max": 5.0, "step": 0.01}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}]}, "optional": {"presence_penalty": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 5.0, "step": 0.01}]}}}, {"key": "off", "inputs": {"required": {}}}]}]}, "optional": {"image": ["IMAGE", {}], "thinking": ["BOOLEAN", {"tooltip": "Operate in thinking mode if the model supports it.", "default": false}]}}, "input_order": {"required": ["clip", "prompt", "max_length", "sampling_mode"], "optional": ["image", "thinking"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["generated_text"], "output_tooltips": [null], "output_matchtypes": null, "name": "TextGenerateLTX2Prompt", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_textgen", "category": "textgen", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["prompt enhance", "LLM", "gemma"], "essentials_category": null, "has_intermediate_output": false}, "ColorToRGBInt": {"input": {"required": {"color": ["COLOR", {"default": "#ffffff", "socketless": true}]}}, "input_order": {"required": ["color"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["rgb_int"], "output_tooltips": [null], "output_matchtypes": null, "name": "ColorToRGBInt", "display_name": "Color to RGB Int", "description": "Convert a color to a RGB integer value.", "python_module": "comfy_extras.nodes_color", "category": "utils", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "CreateList": {"input": {"required": {"inputs": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"input": ["COMFY_MATCHTYPE_V3", {"template": {"template_id": "type", "allowed_types": "*"}}]}}, "prefix": "input", "min": 1, "max": 10}}]}}, "input_order": {"required": ["inputs"]}, "is_input_list": true, "output": ["COMFY_MATCHTYPE_V3"], "output_is_list": [true], "output_name": ["list"], "output_tooltips": [null], "output_matchtypes": ["type"], "name": "CreateList", "display_name": "Create List", "description": "", "python_module": "comfy_extras.nodes_toolkit", "category": "logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["Image Iterator", "Text Iterator", "Iterator"], "essentials_category": null, "has_intermediate_output": false}, "NAGuidance": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model to apply NAG to."}], "nag_scale": ["FLOAT", {"tooltip": "The guidance scale factor. Higher values push further from the negative prompt.", "default": 5.0, "min": 0.0, "max": 50.0, "step": 0.1}], "nag_alpha": ["FLOAT", {"tooltip": "Blending factor for the normalized attention. 1.0 is full replacement, 0.0 is no effect.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "nag_tau": ["FLOAT", {"default": 1.5, "min": 1.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "nag_scale", "nag_alpha", "nag_tau"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "output_tooltips": ["The patched model with NAG enabled."], "output_matchtypes": null, "name": "NAGuidance", "display_name": "Normalized Attention Guidance", "description": "Applies Normalized Attention Guidance to models, enabling negative prompts on distilled/schnell models.", "python_module": "comfy_extras.nodes_nag", "category": "advanced/guidance", "output_node": false, "deprecated": false, "experimental": true, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "SDPoseKeypointExtractor": {"input": {"required": {"model": ["MODEL", {}], "vae": ["VAE", {}], "image": ["IMAGE", {}], "batch_size": ["INT", {"default": 16, "min": 1, "max": 10000, "step": 1}]}, "optional": {"bboxes": ["BOUNDING_BOX", {"tooltip": "Optional bounding boxes for more accurate detections. Required for multi-person detection.", "default": {"x": 0, "y": 0, "width": 512, "height": 512}, "socketless": true, "forceInput": true}]}}, "input_order": {"required": ["model", "vae", "image", "batch_size"], "optional": ["bboxes"]}, "is_input_list": false, "output": ["POSE_KEYPOINT"], "output_is_list": [false], "output_name": ["keypoints"], "output_tooltips": ["Keypoints in OpenPose frame format (canvas_width, canvas_height, people)"], "output_matchtypes": null, "name": "SDPoseKeypointExtractor", "display_name": null, "description": "Extract pose keypoints from images using the SDPose model: https://huggingface.co/Comfy-Org/SDPose/tree/main/checkpoints", "python_module": "comfy_extras.nodes_sdpose", "category": "image/preprocessors", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["openpose", "pose detection", "preprocessor", "keypoints", "sdpose"], "essentials_category": null, "has_intermediate_output": false}, "SDPoseDrawKeypoints": {"input": {"required": {"keypoints": ["POSE_KEYPOINT", {}], "draw_body": ["BOOLEAN", {"default": true}], "draw_hands": ["BOOLEAN", {"default": true}], "draw_face": ["BOOLEAN", {"default": true}], "draw_feet": ["BOOLEAN", {"default": false}], "stick_width": ["INT", {"default": 4, "min": 1, "max": 10, "step": 1}], "face_point_size": ["INT", {"default": 3, "min": 1, "max": 10, "step": 1}], "score_threshold": ["FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["keypoints", "draw_body", "draw_hands", "draw_face", "draw_feet", "stick_width", "face_point_size", "score_threshold"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "SDPoseDrawKeypoints", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_sdpose", "category": "image/preprocessors", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["openpose", "pose detection", "preprocessor", "keypoints", "pose"], "essentials_category": null, "has_intermediate_output": false}, "SDPoseFaceBBoxes": {"input": {"required": {"keypoints": ["POSE_KEYPOINT", {}], "scale": ["FLOAT", {"tooltip": "Multiplier for the bounding box area around each detected face.", "default": 1.5, "min": 1.0, "max": 10.0, "step": 0.1}], "force_square": ["BOOLEAN", {"tooltip": "Expand the shorter bbox axis so the crop region is always square.", "default": true}]}}, "input_order": {"required": ["keypoints", "scale", "force_square"]}, "is_input_list": false, "output": ["BOUNDING_BOX"], "output_is_list": [false], "output_name": ["bboxes"], "output_tooltips": ["Face bounding boxes per frame, compatible with SDPoseKeypointExtractor bboxes input."], "output_matchtypes": null, "name": "SDPoseFaceBBoxes", "display_name": null, "description": "", "python_module": "comfy_extras.nodes_sdpose", "category": "image/preprocessors", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["face bbox", "face bounding box", "pose", "keypoints"], "essentials_category": null, "has_intermediate_output": false}, "CropByBBoxes": {"input": {"required": {"image": ["IMAGE", {}], "bboxes": ["BOUNDING_BOX", {"default": {"x": 0, "y": 0, "width": 512, "height": 512}, "socketless": true, "forceInput": true}], "output_width": ["INT", {"tooltip": "Width each crop is resized to.", "default": 512, "min": 64, "max": 4096, "step": 8}], "output_height": ["INT", {"tooltip": "Height each crop is resized to.", "default": 512, "min": 64, "max": 4096, "step": 8}], "padding": ["INT", {"tooltip": "Extra padding in pixels added on each side of the bbox before cropping.", "default": 0, "min": 0, "max": 1024, "step": 1}], "keep_aspect": ["COMBO", {"tooltip": "Whether to stretch the crop to fit the output size, or pad with black pixels to preserve aspect ratio.", "default": "stretch", "multiselect": false, "options": ["stretch", "pad"]}]}}, "input_order": {"required": ["image", "bboxes", "output_width", "output_height", "padding", "keep_aspect"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": ["All crops stacked into a single image batch."], "output_matchtypes": null, "name": "CropByBBoxes", "display_name": null, "description": "Crop and resize regions from the input image batch based on provided bounding boxes.", "python_module": "comfy_extras.nodes_sdpose", "category": "image/preprocessors", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["crop", "face crop", "bbox crop", "pose", "bounding box"], "essentials_category": null, "has_intermediate_output": false}, "ComfyMathExpression": {"input": {"required": {"expression": ["STRING", {"default": "a + b", "multiline": true}], "values": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"value": ["FLOAT,INT", {}]}}, "names": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"], "min": 1}}]}}, "input_order": {"required": ["expression", "values"]}, "is_input_list": false, "output": ["FLOAT", "INT"], "output_is_list": [false, false], "output_name": ["FLOAT", "INT"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "ComfyMathExpression", "display_name": "Math Expression", "description": "", "python_module": "comfy_extras.nodes_math", "category": "math", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["expression", "formula", "calculate", "calculator", "eval", "math"], "essentials_category": null, "has_intermediate_output": false}, "ComfyNumberConvert": {"input": {"required": {"value": ["INT,FLOAT,STRING,BOOLEAN", {"display_name": "value"}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["FLOAT", "INT"], "output_is_list": [false, false], "output_name": ["FLOAT", "INT"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "ComfyNumberConvert", "display_name": "Number Convert", "description": "", "python_module": "comfy_extras.nodes_number_convert", "category": "math", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["int to float", "float to int", "number convert", "int2float", "float2int", "cast", "parse number", "string to number", "bool to int"], "essentials_category": null, "has_intermediate_output": false}, "Painter": {"input": {"required": {"mask": ["STRING", {"widgetType": "PAINTER", "image_upload": true, "default": "", "socketless": true, "multiline": false}], "width": ["INT", {"hidden": true, "default": 512, "socketless": true, "min": 64, "max": 4096, "step": 64}], "height": ["INT", {"hidden": true, "default": 512, "socketless": true, "min": 64, "max": 4096, "step": 64}], "bg_color": ["COLOR", {"default": "#000000", "socketless": true}]}, "optional": {"image": ["IMAGE", {"tooltip": "Optional base image to paint over"}]}}, "input_order": {"required": ["mask", "width", "height", "bg_color"], "optional": ["image"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "Painter", "display_name": "Painter", "description": "", "python_module": "comfy_extras.nodes_painter", "category": "image", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": true}, "CurveEditor": {"input": {"required": {"curve": ["CURVE", {"default": {"points": [[0.0, 0.0], [1.0, 1.0]], "interpolation": "monotone_cubic"}, "socketless": true}]}, "optional": {"histogram": ["HISTOGRAM", {}]}}, "input_order": {"required": ["curve"], "optional": ["histogram"]}, "is_input_list": false, "output": ["CURVE"], "output_is_list": [false], "output_name": ["curve"], "output_tooltips": [null], "output_matchtypes": null, "name": "CurveEditor", "display_name": "Curve Editor", "description": "", "python_module": "comfy_extras.nodes_curve", "category": "utils", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RTDETR_detect": {"input": {"required": {"model": ["MODEL", {"display_name": "model"}], "image": ["IMAGE", {"display_name": "image"}], "threshold": ["FLOAT", {"display_name": "threshold", "default": 0.5}], "class_name": ["COMBO", {"tooltip": "Filter detections by class. Set to 'all' to disable filtering.", "default": "all", "multiselect": false, "options": ["all", "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]}], "max_detections": ["INT", {"display_name": "max_detections", "tooltip": "Maximum number of detections to return per image. In order of descending confidence score.", "default": 100}]}}, "input_order": {"required": ["model", "image", "threshold", "class_name", "max_detections"]}, "is_input_list": false, "output": ["BOUNDING_BOX"], "output_is_list": [false], "output_name": ["bboxes"], "output_tooltips": [null], "output_matchtypes": null, "name": "RTDETR_detect", "display_name": "RT-DETR Detect", "description": "", "python_module": "comfy_extras.nodes_rtdetr", "category": "detection/", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["bbox", "bounding box", "object detection", "coco"], "essentials_category": null, "has_intermediate_output": false}, "DrawBBoxes": {"input": {"required": {"bboxes": ["BOUNDING_BOX", {"default": {"x": 0, "y": 0, "width": 512, "height": 512}, "socketless": true, "forceInput": true}]}, "optional": {"image": ["IMAGE", {}]}}, "input_order": {"required": ["bboxes"], "optional": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["out_image"], "output_tooltips": [null], "output_matchtypes": null, "name": "DrawBBoxes", "display_name": "Draw BBoxes", "description": "", "python_module": "comfy_extras.nodes_rtdetr", "category": "detection/", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": ["bbox", "bounding box", "object detection", "rt_detr", "visualize detections", "coco"], "essentials_category": null, "has_intermediate_output": false}, "FluxProUltraImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation", "default": "", "multiline": true}], "prompt_upsampling": ["BOOLEAN", {"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", "advanced": true, "default": false}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "aspect_ratio": ["STRING", {"tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", "default": "16:9", "multiline": false}], "raw": ["BOOLEAN", {"tooltip": "When True, generate less processed, more natural-looking images.", "default": false}]}, "optional": {"image_prompt": ["IMAGE", {}], "image_prompt_strength": ["FLOAT", {"tooltip": "Blend between the prompt and the image prompt.", "default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "prompt_upsampling", "seed", "aspect_ratio", "raw"], "optional": ["image_prompt", "image_prompt_strength"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxProUltraImageNode", "display_name": "Flux 1.1 [pro] Ultra Image", "description": "Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.", "python_module": "comfy_api_nodes.nodes_bfl", "category": "api node/image/BFL", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.06}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxKontextProImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation - specify what and how to edit.", "default": "", "multiline": true}], "aspect_ratio": ["STRING", {"tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", "default": "16:9", "multiline": false}], "guidance": ["FLOAT", {"tooltip": "Guidance strength for the image generation process", "default": 3.0, "min": 0.1, "max": 99.0, "step": 0.1}], "steps": ["INT", {"tooltip": "Number of steps for the image generation process", "default": 50, "min": 1, "max": 150}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 1234, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "prompt_upsampling": ["BOOLEAN", {"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", "advanced": true, "default": false}]}, "optional": {"input_image": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "aspect_ratio", "guidance", "steps", "seed", "prompt_upsampling"], "optional": ["input_image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxKontextProImageNode", "display_name": "Flux.1 Kontext [pro] Image", "description": "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.", "python_module": "comfy_api_nodes.nodes_bfl", "category": "api node/image/BFL", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxKontextMaxImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation - specify what and how to edit.", "default": "", "multiline": true}], "aspect_ratio": ["STRING", {"tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.", "default": "16:9", "multiline": false}], "guidance": ["FLOAT", {"tooltip": "Guidance strength for the image generation process", "default": 3.0, "min": 0.1, "max": 99.0, "step": 0.1}], "steps": ["INT", {"tooltip": "Number of steps for the image generation process", "default": 50, "min": 1, "max": 150}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 1234, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "prompt_upsampling": ["BOOLEAN", {"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", "advanced": true, "default": false}]}, "optional": {"input_image": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "aspect_ratio", "guidance", "steps", "seed", "prompt_upsampling"], "optional": ["input_image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxKontextMaxImageNode", "display_name": "Flux.1 Kontext [max] Image", "description": "Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.", "python_module": "comfy_api_nodes.nodes_bfl", "category": "api node/image/BFL", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxProExpandNode": {"input": {"required": {"image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt for the image generation", "default": "", "multiline": true}], "prompt_upsampling": ["BOOLEAN", {"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", "advanced": true, "default": false}], "top": ["INT", {"tooltip": "Number of pixels to expand at the top of the image", "default": 0, "min": 0, "max": 2048}], "bottom": ["INT", {"tooltip": "Number of pixels to expand at the bottom of the image", "default": 0, "min": 0, "max": 2048}], "left": ["INT", {"tooltip": "Number of pixels to expand at the left of the image", "default": 0, "min": 0, "max": 2048}], "right": ["INT", {"tooltip": "Number of pixels to expand at the right of the image", "default": 0, "min": 0, "max": 2048}], "guidance": ["FLOAT", {"tooltip": "Guidance strength for the image generation process", "default": 60, "min": 1.5, "max": 100}], "steps": ["INT", {"tooltip": "Number of steps for the image generation process", "default": 50, "min": 15, "max": 50}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "prompt_upsampling", "top", "bottom", "left", "right", "guidance", "steps", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxProExpandNode", "display_name": "Flux.1 Expand Image", "description": "Outpaints image based on prompt.", "python_module": "comfy_api_nodes.nodes_bfl", "category": "api node/image/BFL", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.05}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "FluxProFillNode": {"input": {"required": {"image": ["IMAGE", {}], "mask": ["MASK", {}], "prompt": ["STRING", {"tooltip": "Prompt for the image generation", "default": "", "multiline": true}], "prompt_upsampling": ["BOOLEAN", {"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", "advanced": true, "default": false}], "guidance": ["FLOAT", {"tooltip": "Guidance strength for the image generation process", "default": 60, "min": 1.5, "max": 100}], "steps": ["INT", {"tooltip": "Number of steps for the image generation process", "default": 50, "min": 15, "max": 50}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "mask", "prompt", "prompt_upsampling", "guidance", "steps", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "FluxProFillNode", "display_name": "Flux.1 Fill Image", "description": "Inpaints image based on mask and prompt.", "python_module": "comfy_api_nodes.nodes_bfl", "category": "api node/image/BFL", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.05}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Flux2ProImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation or edit", "default": "", "multiline": true}], "width": ["INT", {"default": 1024, "min": 256, "max": 2048, "step": 32}], "height": ["INT", {"default": 768, "min": 256, "max": 2048, "step": 32}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "prompt_upsampling": ["BOOLEAN", {"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.", "advanced": true, "default": true}]}, "optional": {"images": ["IMAGE", {"tooltip": "Up to 9 images to be used as references."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "width", "height", "seed", "prompt_upsampling"], "optional": ["images"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "Flux2ProImageNode", "display_name": "Flux.2 [pro] Image", "description": "Generates images synchronously based on prompt and resolution.", "python_module": "comfy_api_nodes.nodes_bfl", "category": "api node/image/BFL", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "width", "type": "INT"}, {"name": "height", "type": "INT"}], "inputs": ["images"], "input_groups": []}, "expr": "\n (\n $MP := 1024 * 1024;\n $outMP := $max([1, $floor(((widgets.width * widgets.height) + $MP - 1) / $MP)]);\n $outputCost := 0.03 + 0.015 * ($outMP - 1);\n inputs.images.connected\n ? {\n \"type\":\"range_usd\",\n \"min_usd\": $outputCost + 0.015,\n \"max_usd\": $outputCost + 0.12,\n \"format\": { \"approximate\": true }\n }\n : {\"type\":\"usd\",\"usd\": $outputCost}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Flux2MaxImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation or edit", "default": "", "multiline": true}], "width": ["INT", {"default": 1024, "min": 256, "max": 2048, "step": 32}], "height": ["INT", {"default": 768, "min": 256, "max": 2048, "step": 32}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "prompt_upsampling": ["BOOLEAN", {"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.", "advanced": true, "default": true}]}, "optional": {"images": ["IMAGE", {"tooltip": "Up to 9 images to be used as references."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "width", "height", "seed", "prompt_upsampling"], "optional": ["images"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "Flux2MaxImageNode", "display_name": "Flux.2 [max] Image", "description": "Generates images synchronously based on prompt and resolution.", "python_module": "comfy_api_nodes.nodes_bfl", "category": "api node/image/BFL", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "width", "type": "INT"}, {"name": "height", "type": "INT"}], "inputs": ["images"], "input_groups": []}, "expr": "\n (\n $MP := 1024 * 1024;\n $outMP := $max([1, $floor(((widgets.width * widgets.height) + $MP - 1) / $MP)]);\n $outputCost := 0.07 + 0.03 * ($outMP - 1);\n\n inputs.images.connected\n ? {\n \"type\":\"range_usd\",\n \"min_usd\": $outputCost + 0.03,\n \"max_usd\": $outputCost + 0.24,\n \"format\": { \"approximate\": true }\n }\n : {\"type\":\"usd\",\"usd\": $outputCost}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "BriaImageEditNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["FIBO"]}], "image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Instruction to edit image", "default": "", "multiline": true}], "negative_prompt": ["STRING", {"default": "", "multiline": true}], "structured_prompt": ["STRING", {"tooltip": "A string containing the structured edit prompt in JSON format. Use this instead of usual prompt for precise, programmatic control.", "default": "", "multiline": true}], "seed": ["INT", {"default": 1, "min": 1, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "guidance_scale": ["FLOAT", {"tooltip": "Higher value makes the image follow the prompt more closely.", "default": 3, "min": 3, "max": 5, "step": 0.01, "display": "number"}], "steps": ["INT", {"default": 50, "min": 20, "max": 50, "step": 1, "display": "number"}], "moderation": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Moderation settings", "options": [{"key": "false", "inputs": {"required": {}}}, {"key": "true", "inputs": {"required": {"prompt_content_moderation": ["BOOLEAN", {"default": false}], "visual_input_moderation": ["BOOLEAN", {"default": false}], "visual_output_moderation": ["BOOLEAN", {"default": true}]}}}]}]}, "optional": {"mask": ["MASK", {"tooltip": "If omitted, the edit applies to the entire image."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image", "prompt", "negative_prompt", "structured_prompt", "seed", "guidance_scale", "steps", "moderation"], "optional": ["mask"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "STRING"], "output_is_list": [false, false], "output_name": ["IMAGE", "structured_prompt"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "BriaImageEditNode", "display_name": "Bria FIBO Image Edit", "description": "Edit images using Bria latest model", "python_module": "comfy_api_nodes.nodes_bria", "category": "api node/image/Bria", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.04}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "BriaRemoveImageBackground": {"input": {"required": {"image": ["IMAGE", {}], "moderation": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Moderation settings", "options": [{"key": "false", "inputs": {"required": {}}}, {"key": "true", "inputs": {"required": {"visual_input_moderation": ["BOOLEAN", {"default": false}], "visual_output_moderation": ["BOOLEAN", {"default": true}]}}}]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "moderation", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "BriaRemoveImageBackground", "display_name": "Bria Remove Image Background", "description": "Remove the background from an image using Bria RMBG 2.0.", "python_module": "comfy_api_nodes.nodes_bria", "category": "api node/image/Bria", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.018}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "BriaRemoveVideoBackground": {"input": {"required": {"video": ["VIDEO", {}], "background_color": ["COMBO", {"tooltip": "Background color for the output video.", "multiselect": false, "options": ["Black", "White", "Gray", "Red", "Green", "Blue", "Yellow", "Cyan", "Magenta", "Orange"]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["video", "background_color", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "BriaRemoveVideoBackground", "display_name": "Bria Remove Video Background", "description": "Remove the background from a video using Bria. ", "python_module": "comfy_api_nodes.nodes_bria", "category": "api node/video/Bria", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.14,\"format\":{\"suffix\":\"/second\"}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ByteDanceImageNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["seedream-3-0-t2i-250415"]}], "prompt": ["STRING", {"tooltip": "The text prompt used to generate the image", "multiline": true}], "size_preset": ["COMBO", {"tooltip": "Pick a recommended size. Select Custom to use the width and height below", "multiselect": false, "options": ["1024x1024 (1:1)", "864x1152 (3:4)", "1152x864 (4:3)", "1280x720 (16:9)", "720x1280 (9:16)", "832x1248 (2:3)", "1248x832 (3:2)", "1512x648 (21:9)", "2048x2048 (1:1)", "Custom"]}], "width": ["INT", {"tooltip": "Custom width for image. Value is working only if `size_preset` is set to `Custom`", "default": 1024, "min": 512, "max": 2048, "step": 64}], "height": ["INT", {"tooltip": "Custom height for image. Value is working only if `size_preset` is set to `Custom`", "default": 1024, "min": 512, "max": 2048, "step": 64}]}, "optional": {"seed": ["INT", {"tooltip": "Seed to use for generation", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "guidance_scale": ["FLOAT", {"tooltip": "Higher value makes the image follow the prompt more closely", "default": 2.5, "min": 1.0, "max": 10.0, "step": 0.01, "display": "number"}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an \"AI generated\" watermark to the image", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "size_preset", "width", "height"], "optional": ["seed", "guidance_scale", "watermark"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ByteDanceImageNode", "display_name": "ByteDance Image", "description": "Generate images using ByteDance models via api based on prompt", "python_module": "comfy_api_nodes.nodes_bytedance", "category": "api node/image/ByteDance", "output_node": false, "deprecated": true, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.03}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ByteDanceSeedreamNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["seedream 5.0 lite", "seedream-4-5-251128", "seedream-4-0-250828"]}], "prompt": ["STRING", {"tooltip": "Text prompt for creating or editing an image.", "default": "", "multiline": true}], "size_preset": ["COMBO", {"tooltip": "Pick a recommended size. Select Custom to use the width and height below.", "multiselect": false, "options": ["2048x2048 (1:1)", "2304x1728 (4:3)", "1728x2304 (3:4)", "2560x1440 (16:9)", "1440x2560 (9:16)", "2496x1664 (3:2)", "1664x2496 (2:3)", "3024x1296 (21:9)", "3072x3072 (1:1)", "4096x4096 (1:1)", "Custom"]}]}, "optional": {"image": ["IMAGE", {"tooltip": "Input image(s) for image-to-image generation. Reference image(s) for single or multi-reference generation."}], "width": ["INT", {"tooltip": "Custom width for image. Value is working only if `size_preset` is set to `Custom`", "default": 2048, "min": 1024, "max": 6240, "step": 2}], "height": ["INT", {"tooltip": "Custom height for image. Value is working only if `size_preset` is set to `Custom`", "default": 2048, "min": 1024, "max": 4992, "step": 2}], "sequential_image_generation": ["COMBO", {"tooltip": "Group image generation mode. 'disabled' generates a single image. 'auto' lets the model decide whether to generate multiple related images (e.g., story scenes, character variations).", "multiselect": false, "options": ["disabled", "auto"]}], "max_images": ["INT", {"tooltip": "Maximum number of images to generate when sequential_image_generation='auto'. Total images (input + generated) cannot exceed 15.", "default": 1, "min": 1, "max": 15, "step": 1, "display": "number"}], "seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an \"AI generated\" watermark to the image.", "advanced": true, "default": false}], "fail_on_partial": ["BOOLEAN", {"tooltip": "If enabled, abort execution if any requested images are missing or return an error.", "advanced": true, "default": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "size_preset"], "optional": ["image", "width", "height", "sequential_image_generation", "max_images", "seed", "watermark", "fail_on_partial"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ByteDanceSeedreamNode", "display_name": "ByteDance Seedream 4.5 & 5.0", "description": "Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.", "python_module": "comfy_api_nodes.nodes_bytedance", "category": "api node/image/ByteDance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $price := $contains(widgets.model, \"5.0 lite\") ? 0.035 :\n $contains(widgets.model, \"4-5\") ? 0.04 : 0.03;\n {\n \"type\":\"usd\",\n \"usd\": $price,\n \"format\": { \"suffix\":\" x images/Run\", \"approximate\": true }\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ByteDanceTextToVideoNode": {"input": {"required": {"model": ["COMBO", {"default": "seedance-1-0-pro-fast-251015", "multiselect": false, "options": ["seedance-1-5-pro-251215", "seedance-1-0-pro-250528", "seedance-1-0-lite-t2v-250428", "seedance-1-0-pro-fast-251015"]}], "prompt": ["STRING", {"tooltip": "The text prompt used to generate the video.", "multiline": true}], "resolution": ["COMBO", {"tooltip": "The resolution of the output video.", "multiselect": false, "options": ["480p", "720p", "1080p"]}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video.", "multiselect": false, "options": ["16:9", "4:3", "1:1", "3:4", "9:16", "21:9"]}], "duration": ["INT", {"tooltip": "The duration of the output video in seconds.", "default": 5, "min": 3, "max": 12, "step": 1, "display": "slider"}]}, "optional": {"seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "camera_fixed": ["BOOLEAN", {"tooltip": "Specifies whether to fix the camera. The platform appends an instruction to fix the camera to your prompt, but does not guarantee the actual effect.", "advanced": true, "default": false}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an \"AI generated\" watermark to the video.", "advanced": true, "default": false}], "generate_audio": ["BOOLEAN", {"tooltip": "This parameter is ignored for any model except seedance-1-5-pro.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "resolution", "aspect_ratio", "duration"], "optional": ["seed", "camera_fixed", "watermark", "generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ByteDanceTextToVideoNode", "display_name": "ByteDance Text to Video", "description": "Generate video using ByteDance models via api based on prompt", "python_module": "comfy_api_nodes.nodes_bytedance", "category": "api node/video/ByteDance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $priceByModel := {\n \"seedance-1-5-pro\": {\n \"480p\":[0.12,0.12],\n \"720p\":[0.26,0.26],\n \"1080p\":[0.58,0.59]\n },\n \"seedance-1-0-pro\": {\n \"480p\":[0.23,0.24],\n \"720p\":[0.51,0.56],\n \"1080p\":[1.18,1.22]\n },\n \"seedance-1-0-pro-fast\": {\n \"480p\":[0.09,0.1],\n \"720p\":[0.21,0.23],\n \"1080p\":[0.47,0.49]\n },\n \"seedance-1-0-lite\": {\n \"480p\":[0.17,0.18],\n \"720p\":[0.37,0.41],\n \"1080p\":[0.85,0.88]\n }\n };\n $model := widgets.model;\n $modelKey :=\n $contains($model, \"seedance-1-5-pro\") ? \"seedance-1-5-pro\" :\n $contains($model, \"seedance-1-0-pro-fast\") ? \"seedance-1-0-pro-fast\" :\n $contains($model, \"seedance-1-0-pro\") ? \"seedance-1-0-pro\" :\n \"seedance-1-0-lite\";\n $resolution := widgets.resolution;\n $resKey :=\n $contains($resolution, \"1080\") ? \"1080p\" :\n $contains($resolution, \"720\") ? \"720p\" :\n \"480p\";\n $modelPrices := $lookup($priceByModel, $modelKey);\n $baseRange := $lookup($modelPrices, $resKey);\n $min10s := $baseRange[0];\n $max10s := $baseRange[1];\n $scale := widgets.duration / 10;\n $audioMultiplier := ($modelKey = \"seedance-1-5-pro\" and widgets.generate_audio) ? 2 : 1;\n $minCost := $min10s * $scale * $audioMultiplier;\n $maxCost := $max10s * $scale * $audioMultiplier;\n ($minCost = $maxCost)\n ? {\"type\":\"usd\",\"usd\": $minCost, \"format\": { \"approximate\": true }}\n : {\"type\":\"range_usd\",\"min_usd\": $minCost, \"max_usd\": $maxCost, \"format\": { \"approximate\": true }}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ByteDanceImageToVideoNode": {"input": {"required": {"model": ["COMBO", {"default": "seedance-1-0-pro-fast-251015", "multiselect": false, "options": ["seedance-1-5-pro-251215", "seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428", "seedance-1-0-pro-fast-251015"]}], "prompt": ["STRING", {"tooltip": "The text prompt used to generate the video.", "multiline": true}], "image": ["IMAGE", {"tooltip": "First frame to be used for the video."}], "resolution": ["COMBO", {"tooltip": "The resolution of the output video.", "multiselect": false, "options": ["480p", "720p", "1080p"]}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video.", "multiselect": false, "options": ["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"]}], "duration": ["INT", {"tooltip": "The duration of the output video in seconds.", "default": 5, "min": 3, "max": 12, "step": 1, "display": "slider"}]}, "optional": {"seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "camera_fixed": ["BOOLEAN", {"tooltip": "Specifies whether to fix the camera. The platform appends an instruction to fix the camera to your prompt, but does not guarantee the actual effect.", "advanced": true, "default": false}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an \"AI generated\" watermark to the video.", "advanced": true, "default": false}], "generate_audio": ["BOOLEAN", {"tooltip": "This parameter is ignored for any model except seedance-1-5-pro.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "image", "resolution", "aspect_ratio", "duration"], "optional": ["seed", "camera_fixed", "watermark", "generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ByteDanceImageToVideoNode", "display_name": "ByteDance Image to Video", "description": "Generate video using ByteDance models via api based on image and prompt", "python_module": "comfy_api_nodes.nodes_bytedance", "category": "api node/video/ByteDance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $priceByModel := {\n \"seedance-1-5-pro\": {\n \"480p\":[0.12,0.12],\n \"720p\":[0.26,0.26],\n \"1080p\":[0.58,0.59]\n },\n \"seedance-1-0-pro\": {\n \"480p\":[0.23,0.24],\n \"720p\":[0.51,0.56],\n \"1080p\":[1.18,1.22]\n },\n \"seedance-1-0-pro-fast\": {\n \"480p\":[0.09,0.1],\n \"720p\":[0.21,0.23],\n \"1080p\":[0.47,0.49]\n },\n \"seedance-1-0-lite\": {\n \"480p\":[0.17,0.18],\n \"720p\":[0.37,0.41],\n \"1080p\":[0.85,0.88]\n }\n };\n $model := widgets.model;\n $modelKey :=\n $contains($model, \"seedance-1-5-pro\") ? \"seedance-1-5-pro\" :\n $contains($model, \"seedance-1-0-pro-fast\") ? \"seedance-1-0-pro-fast\" :\n $contains($model, \"seedance-1-0-pro\") ? \"seedance-1-0-pro\" :\n \"seedance-1-0-lite\";\n $resolution := widgets.resolution;\n $resKey :=\n $contains($resolution, \"1080\") ? \"1080p\" :\n $contains($resolution, \"720\") ? \"720p\" :\n \"480p\";\n $modelPrices := $lookup($priceByModel, $modelKey);\n $baseRange := $lookup($modelPrices, $resKey);\n $min10s := $baseRange[0];\n $max10s := $baseRange[1];\n $scale := widgets.duration / 10;\n $audioMultiplier := ($modelKey = \"seedance-1-5-pro\" and widgets.generate_audio) ? 2 : 1;\n $minCost := $min10s * $scale * $audioMultiplier;\n $maxCost := $max10s * $scale * $audioMultiplier;\n ($minCost = $maxCost)\n ? {\"type\":\"usd\",\"usd\": $minCost, \"format\": { \"approximate\": true }}\n : {\"type\":\"range_usd\",\"min_usd\": $minCost, \"max_usd\": $maxCost, \"format\": { \"approximate\": true }}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ByteDanceFirstLastFrameNode": {"input": {"required": {"model": ["COMBO", {"default": "seedance-1-0-lite-i2v-250428", "multiselect": false, "options": ["seedance-1-5-pro-251215", "seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428"]}], "prompt": ["STRING", {"tooltip": "The text prompt used to generate the video.", "multiline": true}], "first_frame": ["IMAGE", {"tooltip": "First frame to be used for the video."}], "last_frame": ["IMAGE", {"tooltip": "Last frame to be used for the video."}], "resolution": ["COMBO", {"tooltip": "The resolution of the output video.", "multiselect": false, "options": ["480p", "720p", "1080p"]}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video.", "multiselect": false, "options": ["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"]}], "duration": ["INT", {"tooltip": "The duration of the output video in seconds.", "default": 5, "min": 3, "max": 12, "step": 1, "display": "slider"}]}, "optional": {"seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "camera_fixed": ["BOOLEAN", {"tooltip": "Specifies whether to fix the camera. The platform appends an instruction to fix the camera to your prompt, but does not guarantee the actual effect.", "advanced": true, "default": false}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an \"AI generated\" watermark to the video.", "advanced": true, "default": false}], "generate_audio": ["BOOLEAN", {"tooltip": "This parameter is ignored for any model except seedance-1-5-pro.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "first_frame", "last_frame", "resolution", "aspect_ratio", "duration"], "optional": ["seed", "camera_fixed", "watermark", "generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ByteDanceFirstLastFrameNode", "display_name": "ByteDance First-Last-Frame to Video", "description": "Generate video using prompt and first and last frames.", "python_module": "comfy_api_nodes.nodes_bytedance", "category": "api node/video/ByteDance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $priceByModel := {\n \"seedance-1-5-pro\": {\n \"480p\":[0.12,0.12],\n \"720p\":[0.26,0.26],\n \"1080p\":[0.58,0.59]\n },\n \"seedance-1-0-pro\": {\n \"480p\":[0.23,0.24],\n \"720p\":[0.51,0.56],\n \"1080p\":[1.18,1.22]\n },\n \"seedance-1-0-pro-fast\": {\n \"480p\":[0.09,0.1],\n \"720p\":[0.21,0.23],\n \"1080p\":[0.47,0.49]\n },\n \"seedance-1-0-lite\": {\n \"480p\":[0.17,0.18],\n \"720p\":[0.37,0.41],\n \"1080p\":[0.85,0.88]\n }\n };\n $model := widgets.model;\n $modelKey :=\n $contains($model, \"seedance-1-5-pro\") ? \"seedance-1-5-pro\" :\n $contains($model, \"seedance-1-0-pro-fast\") ? \"seedance-1-0-pro-fast\" :\n $contains($model, \"seedance-1-0-pro\") ? \"seedance-1-0-pro\" :\n \"seedance-1-0-lite\";\n $resolution := widgets.resolution;\n $resKey :=\n $contains($resolution, \"1080\") ? \"1080p\" :\n $contains($resolution, \"720\") ? \"720p\" :\n \"480p\";\n $modelPrices := $lookup($priceByModel, $modelKey);\n $baseRange := $lookup($modelPrices, $resKey);\n $min10s := $baseRange[0];\n $max10s := $baseRange[1];\n $scale := widgets.duration / 10;\n $audioMultiplier := ($modelKey = \"seedance-1-5-pro\" and widgets.generate_audio) ? 2 : 1;\n $minCost := $min10s * $scale * $audioMultiplier;\n $maxCost := $max10s * $scale * $audioMultiplier;\n ($minCost = $maxCost)\n ? {\"type\":\"usd\",\"usd\": $minCost, \"format\": { \"approximate\": true }}\n : {\"type\":\"range_usd\",\"min_usd\": $minCost, \"max_usd\": $maxCost, \"format\": { \"approximate\": true }}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ByteDanceImageReferenceNode": {"input": {"required": {"model": ["COMBO", {"default": "seedance-1-0-lite-i2v-250428", "multiselect": false, "options": ["seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428"]}], "prompt": ["STRING", {"tooltip": "The text prompt used to generate the video.", "multiline": true}], "images": ["IMAGE", {"tooltip": "One to four images."}], "resolution": ["COMBO", {"tooltip": "The resolution of the output video.", "multiselect": false, "options": ["480p", "720p"]}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video.", "multiselect": false, "options": ["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"]}], "duration": ["INT", {"tooltip": "The duration of the output video in seconds.", "default": 5, "min": 3, "max": 12, "step": 1, "display": "slider"}]}, "optional": {"seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an \"AI generated\" watermark to the video.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "images", "resolution", "aspect_ratio", "duration"], "optional": ["seed", "watermark"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ByteDanceImageReferenceNode", "display_name": "ByteDance Reference Images to Video", "description": "Generate video using prompt and reference images.", "python_module": "comfy_api_nodes.nodes_bytedance", "category": "api node/video/ByteDance", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $priceByModel := {\n \"seedance-1-0-pro\": {\n \"480p\":[0.23,0.24],\n \"720p\":[0.51,0.56]\n },\n \"seedance-1-0-lite\": {\n \"480p\":[0.17,0.18],\n \"720p\":[0.37,0.41]\n }\n };\n $model := widgets.model;\n $modelKey :=\n $contains($model, \"seedance-1-0-pro\") ? \"seedance-1-0-pro\" :\n \"seedance-1-0-lite\";\n $resolution := widgets.resolution;\n $resKey :=\n $contains($resolution, \"720\") ? \"720p\" :\n \"480p\";\n $modelPrices := $lookup($priceByModel, $modelKey);\n $baseRange := $lookup($modelPrices, $resKey);\n $min10s := $baseRange[0];\n $max10s := $baseRange[1];\n $scale := widgets.duration / 10;\n $minCost := $min10s * $scale;\n $maxCost := $max10s * $scale;\n ($minCost = $maxCost)\n ? {\"type\":\"usd\",\"usd\": $minCost}\n : {\"type\":\"range_usd\",\"min_usd\": $minCost, \"max_usd\": $maxCost}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ElevenLabsSpeechToText": {"input": {"required": {"audio": ["AUDIO", {"tooltip": "Audio to transcribe."}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for transcription.", "options": [{"key": "scribe_v2", "inputs": {"required": {"tag_audio_events": ["BOOLEAN", {"tooltip": "Annotate sounds like (laughter), (music), etc. in transcript.", "default": false}], "diarize": ["BOOLEAN", {"tooltip": "Annotate which speaker is talking.", "default": false}], "diarization_threshold": ["FLOAT", {"tooltip": "Speaker separation sensitivity. Lower values are more sensitive to speaker changes.", "default": 0.22, "min": 0.1, "max": 0.4, "step": 0.01, "display": "slider"}], "temperature": ["FLOAT", {"tooltip": "Randomness control. 0.0 uses model default. Higher values increase randomness.", "default": 0.0, "min": 0.0, "max": 2.0, "step": 0.01, "display": "slider"}], "timestamps_granularity": ["COMBO", {"tooltip": "Timing precision for transcript words.", "default": "word", "multiselect": false, "options": ["word", "character", "none"]}]}}}]}], "language_code": ["STRING", {"tooltip": "ISO-639-1 or ISO-639-3 language code (e.g., 'en', 'es', 'fra'). Leave empty for automatic detection.", "default": "", "multiline": false}], "num_speakers": ["INT", {"tooltip": "Maximum number of speakers to predict. Set to 0 for automatic detection.", "default": 0, "min": 0, "max": 32, "display": "slider"}], "seed": ["INT", {"tooltip": "Seed for reproducibility (determinism not guaranteed).", "default": 1, "min": 0, "max": 2147483647}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["audio", "model", "language_code", "num_speakers", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["text", "language_code", "words_json"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "ElevenLabsSpeechToText", "display_name": "ElevenLabs Speech to Text", "description": "Transcribe audio to text. Supports automatic language detection, speaker diarization, and audio event tagging.", "python_module": "comfy_api_nodes.nodes_elevenlabs", "category": "api node/audio/ElevenLabs", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.0073,\"format\":{\"approximate\":true,\"suffix\":\"/minute\"}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ElevenLabsVoiceSelector": {"input": {"required": {"voice": ["COMBO", {"tooltip": "Choose a voice from the predefined ElevenLabs voices.", "multiselect": false, "options": ["Roger (male, american)", "Sarah (female, american)", "Laura (female, american)", "Charlie (male, australian)", "George (male, british)", "Callum (male, american)", "River (neutral, american)", "Harry (male, american)", "Liam (male, american)", "Alice (female, british)", "Matilda (female, american)", "Will (male, american)", "Jessica (female, american)", "Eric (male, american)", "Bella (female, american)", "Chris (male, american)", "Brian (male, american)", "Daniel (male, british)", "Lily (female, british)", "Adam (male, american)", "Bill (male, american)"]}]}}, "input_order": {"required": ["voice"]}, "is_input_list": false, "output": ["ELEVENLABS_VOICE"], "output_is_list": [false], "output_name": ["voice"], "output_tooltips": [null], "output_matchtypes": null, "name": "ElevenLabsVoiceSelector", "display_name": "ElevenLabs Voice Selector", "description": "Select a predefined ElevenLabs voice for text-to-speech generation.", "python_module": "comfy_api_nodes.nodes_elevenlabs", "category": "api node/audio/ElevenLabs", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ElevenLabsTextToSpeech": {"input": {"required": {"voice": ["ELEVENLABS_VOICE", {"tooltip": "Voice to use for speech synthesis. Connect from Voice Selector or Instant Voice Clone."}], "text": ["STRING", {"tooltip": "The text to convert to speech.", "default": "", "multiline": true}], "stability": ["FLOAT", {"tooltip": "Voice stability. Lower values give broader emotional range, higher values produce more consistent but potentially monotonous speech.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}], "apply_text_normalization": ["COMBO", {"tooltip": "Text normalization mode. 'auto' lets the system decide, 'on' always applies normalization, 'off' skips it.", "multiselect": false, "options": ["auto", "on", "off"]}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for text-to-speech.", "options": [{"key": "eleven_multilingual_v2", "inputs": {"required": {"speed": ["FLOAT", {"tooltip": "Speech speed. 1.0 is normal, <1.0 slower, >1.0 faster.", "default": 1.0, "min": 0.7, "max": 1.3, "step": 0.01, "display": "slider"}], "similarity_boost": ["FLOAT", {"tooltip": "Similarity boost. Higher values make the voice more similar to the original.", "default": 0.75, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}], "use_speaker_boost": ["BOOLEAN", {"tooltip": "Boost similarity to the original speaker voice.", "default": false}], "style": ["FLOAT", {"tooltip": "Style exaggeration. Higher values increase stylistic expression but may reduce stability.", "default": 0.0, "min": 0.0, "max": 0.2, "step": 0.01, "display": "slider"}]}}}, {"key": "eleven_v3", "inputs": {"required": {"speed": ["FLOAT", {"tooltip": "Speech speed. 1.0 is normal, <1.0 slower, >1.0 faster.", "default": 1.0, "min": 0.7, "max": 1.3, "step": 0.01, "display": "slider"}], "similarity_boost": ["FLOAT", {"tooltip": "Similarity boost. Higher values make the voice more similar to the original.", "default": 0.75, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}]}}}]}], "language_code": ["STRING", {"tooltip": "ISO-639-1 or ISO-639-3 language code (e.g., 'en', 'es', 'fra'). Leave empty for automatic detection.", "default": "", "multiline": false}], "seed": ["INT", {"tooltip": "Seed for reproducibility (determinism not guaranteed).", "default": 1, "min": 0, "max": 2147483647}], "output_format": ["COMBO", {"tooltip": "Audio output format.", "multiselect": false, "options": ["mp3_44100_192", "opus_48000_192"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["voice", "text", "stability", "apply_text_normalization", "model", "language_code", "seed", "output_format"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ElevenLabsTextToSpeech", "display_name": "ElevenLabs Text to Speech", "description": "Convert text to speech.", "python_module": "comfy_api_nodes.nodes_elevenlabs", "category": "api node/audio/ElevenLabs", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.24,\"format\":{\"approximate\":true,\"suffix\":\"/1K chars\"}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ElevenLabsAudioIsolation": {"input": {"required": {"audio": ["AUDIO", {"tooltip": "Audio to process for background noise removal."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ElevenLabsAudioIsolation", "display_name": "ElevenLabs Voice Isolation", "description": "Remove background noise from audio, isolating vocals or speech.", "python_module": "comfy_api_nodes.nodes_elevenlabs", "category": "api node/audio/ElevenLabs", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.24,\"format\":{\"approximate\":true,\"suffix\":\"/minute\"}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ElevenLabsTextToSoundEffects": {"input": {"required": {"text": ["STRING", {"tooltip": "Text description of the sound effect to generate.", "default": "", "multiline": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for sound effect generation.", "options": [{"key": "eleven_sfx_v2", "inputs": {"required": {"duration": ["FLOAT", {"tooltip": "Duration of generated sound in seconds.", "default": 5.0, "min": 0.5, "max": 30.0, "step": 0.1, "display": "slider"}], "loop": ["BOOLEAN", {"tooltip": "Create a smoothly looping sound effect.", "default": false}], "prompt_influence": ["FLOAT", {"tooltip": "How closely generation follows the prompt. Higher values make the sound follow the text more closely.", "default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}]}}}]}], "output_format": ["COMBO", {"tooltip": "Audio output format.", "multiselect": false, "options": ["mp3_44100_192", "opus_48000_192"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["text", "model", "output_format"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ElevenLabsTextToSoundEffects", "display_name": "ElevenLabs Text to Sound Effects", "description": "Generate sound effects from text descriptions.", "python_module": "comfy_api_nodes.nodes_elevenlabs", "category": "api node/audio/ElevenLabs", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.14,\"format\":{\"approximate\":true,\"suffix\":\"/minute\"}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ElevenLabsInstantVoiceClone": {"input": {"required": {"files": ["COMFY_AUTOGROW_V3", {"tooltip": "Audio recordings for voice cloning.", "template": {"input": {"required": {"audio": ["AUDIO", {}]}}, "prefix": "audio", "min": 1, "max": 8}}], "remove_background_noise": ["BOOLEAN", {"tooltip": "Remove background noise from voice samples using audio isolation.", "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["files", "remove_background_noise"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["ELEVENLABS_VOICE"], "output_is_list": [false], "output_name": ["voice"], "output_tooltips": [null], "output_matchtypes": null, "name": "ElevenLabsInstantVoiceClone", "display_name": "ElevenLabs Instant Voice Clone", "description": "Create a cloned voice from audio samples. Provide 1-8 audio recordings of the voice to clone.", "python_module": "comfy_api_nodes.nodes_elevenlabs", "category": "api node/audio/ElevenLabs", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.15}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ElevenLabsSpeechToSpeech": {"input": {"required": {"voice": ["ELEVENLABS_VOICE", {"tooltip": "Target voice for the transformation. Connect from Voice Selector or Instant Voice Clone."}], "audio": ["AUDIO", {"tooltip": "Source audio to transform."}], "stability": ["FLOAT", {"tooltip": "Voice stability. Lower values give broader emotional range, higher values produce more consistent but potentially monotonous speech.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for speech-to-speech transformation.", "options": [{"key": "eleven_multilingual_sts_v2", "inputs": {"required": {"speed": ["FLOAT", {"tooltip": "Speech speed. 1.0 is normal, <1.0 slower, >1.0 faster.", "default": 1.0, "min": 0.7, "max": 1.3, "step": 0.01, "display": "slider"}], "similarity_boost": ["FLOAT", {"tooltip": "Similarity boost. Higher values make the voice more similar to the original.", "default": 0.75, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}], "use_speaker_boost": ["BOOLEAN", {"tooltip": "Boost similarity to the original speaker voice.", "default": false}], "style": ["FLOAT", {"tooltip": "Style exaggeration. Higher values increase stylistic expression but may reduce stability.", "default": 0.0, "min": 0.0, "max": 0.2, "step": 0.01, "display": "slider"}]}}}, {"key": "eleven_english_sts_v2", "inputs": {"required": {"speed": ["FLOAT", {"tooltip": "Speech speed. 1.0 is normal, <1.0 slower, >1.0 faster.", "default": 1.0, "min": 0.7, "max": 1.3, "step": 0.01, "display": "slider"}], "similarity_boost": ["FLOAT", {"tooltip": "Similarity boost. Higher values make the voice more similar to the original.", "default": 0.75, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}], "use_speaker_boost": ["BOOLEAN", {"tooltip": "Boost similarity to the original speaker voice.", "default": false}], "style": ["FLOAT", {"tooltip": "Style exaggeration. Higher values increase stylistic expression but may reduce stability.", "default": 0.0, "min": 0.0, "max": 0.2, "step": 0.01, "display": "slider"}]}}}]}], "output_format": ["COMBO", {"tooltip": "Audio output format.", "multiselect": false, "options": ["mp3_44100_192", "opus_48000_192"]}], "seed": ["INT", {"tooltip": "Seed for reproducibility.", "default": 0, "min": 0, "max": 4294967295}], "remove_background_noise": ["BOOLEAN", {"tooltip": "Remove background noise from input audio using audio isolation.", "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["voice", "audio", "stability", "model", "output_format", "seed", "remove_background_noise"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ElevenLabsSpeechToSpeech", "display_name": "ElevenLabs Speech to Speech", "description": "Transform speech from one voice to another while preserving the original content and emotion.", "python_module": "comfy_api_nodes.nodes_elevenlabs", "category": "api node/audio/ElevenLabs", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.24,\"format\":{\"approximate\":true,\"suffix\":\"/minute\"}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ElevenLabsTextToDialogue": {"input": {"required": {"stability": ["FLOAT", {"tooltip": "Voice stability. Lower values give broader emotional range, higher values produce more consistent but potentially monotonous speech.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.5, "display": "slider"}], "apply_text_normalization": ["COMBO", {"tooltip": "Text normalization mode. 'auto' lets the system decide, 'on' always applies normalization, 'off' skips it.", "multiselect": false, "options": ["auto", "on", "off"]}], "model": ["COMBO", {"tooltip": "Model to use for dialogue generation.", "multiselect": false, "options": ["eleven_v3"]}], "inputs": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Number of dialogue entries.", "options": [{"key": "1", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "2", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "3", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}], "text3": ["STRING", {"tooltip": "Text content for dialogue entry 3.", "default": "", "multiline": true}], "voice3": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 3. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "4", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}], "text3": ["STRING", {"tooltip": "Text content for dialogue entry 3.", "default": "", "multiline": true}], "voice3": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 3. Connect from Voice Selector or Instant Voice Clone."}], "text4": ["STRING", {"tooltip": "Text content for dialogue entry 4.", "default": "", "multiline": true}], "voice4": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 4. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "5", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}], "text3": ["STRING", {"tooltip": "Text content for dialogue entry 3.", "default": "", "multiline": true}], "voice3": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 3. Connect from Voice Selector or Instant Voice Clone."}], "text4": ["STRING", {"tooltip": "Text content for dialogue entry 4.", "default": "", "multiline": true}], "voice4": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 4. Connect from Voice Selector or Instant Voice Clone."}], "text5": ["STRING", {"tooltip": "Text content for dialogue entry 5.", "default": "", "multiline": true}], "voice5": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 5. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "6", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}], "text3": ["STRING", {"tooltip": "Text content for dialogue entry 3.", "default": "", "multiline": true}], "voice3": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 3. Connect from Voice Selector or Instant Voice Clone."}], "text4": ["STRING", {"tooltip": "Text content for dialogue entry 4.", "default": "", "multiline": true}], "voice4": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 4. Connect from Voice Selector or Instant Voice Clone."}], "text5": ["STRING", {"tooltip": "Text content for dialogue entry 5.", "default": "", "multiline": true}], "voice5": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 5. Connect from Voice Selector or Instant Voice Clone."}], "text6": ["STRING", {"tooltip": "Text content for dialogue entry 6.", "default": "", "multiline": true}], "voice6": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 6. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "7", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}], "text3": ["STRING", {"tooltip": "Text content for dialogue entry 3.", "default": "", "multiline": true}], "voice3": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 3. Connect from Voice Selector or Instant Voice Clone."}], "text4": ["STRING", {"tooltip": "Text content for dialogue entry 4.", "default": "", "multiline": true}], "voice4": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 4. Connect from Voice Selector or Instant Voice Clone."}], "text5": ["STRING", {"tooltip": "Text content for dialogue entry 5.", "default": "", "multiline": true}], "voice5": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 5. Connect from Voice Selector or Instant Voice Clone."}], "text6": ["STRING", {"tooltip": "Text content for dialogue entry 6.", "default": "", "multiline": true}], "voice6": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 6. Connect from Voice Selector or Instant Voice Clone."}], "text7": ["STRING", {"tooltip": "Text content for dialogue entry 7.", "default": "", "multiline": true}], "voice7": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 7. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "8", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}], "text3": ["STRING", {"tooltip": "Text content for dialogue entry 3.", "default": "", "multiline": true}], "voice3": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 3. Connect from Voice Selector or Instant Voice Clone."}], "text4": ["STRING", {"tooltip": "Text content for dialogue entry 4.", "default": "", "multiline": true}], "voice4": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 4. Connect from Voice Selector or Instant Voice Clone."}], "text5": ["STRING", {"tooltip": "Text content for dialogue entry 5.", "default": "", "multiline": true}], "voice5": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 5. Connect from Voice Selector or Instant Voice Clone."}], "text6": ["STRING", {"tooltip": "Text content for dialogue entry 6.", "default": "", "multiline": true}], "voice6": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 6. Connect from Voice Selector or Instant Voice Clone."}], "text7": ["STRING", {"tooltip": "Text content for dialogue entry 7.", "default": "", "multiline": true}], "voice7": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 7. Connect from Voice Selector or Instant Voice Clone."}], "text8": ["STRING", {"tooltip": "Text content for dialogue entry 8.", "default": "", "multiline": true}], "voice8": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 8. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "9", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}], "text3": ["STRING", {"tooltip": "Text content for dialogue entry 3.", "default": "", "multiline": true}], "voice3": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 3. Connect from Voice Selector or Instant Voice Clone."}], "text4": ["STRING", {"tooltip": "Text content for dialogue entry 4.", "default": "", "multiline": true}], "voice4": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 4. Connect from Voice Selector or Instant Voice Clone."}], "text5": ["STRING", {"tooltip": "Text content for dialogue entry 5.", "default": "", "multiline": true}], "voice5": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 5. Connect from Voice Selector or Instant Voice Clone."}], "text6": ["STRING", {"tooltip": "Text content for dialogue entry 6.", "default": "", "multiline": true}], "voice6": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 6. Connect from Voice Selector or Instant Voice Clone."}], "text7": ["STRING", {"tooltip": "Text content for dialogue entry 7.", "default": "", "multiline": true}], "voice7": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 7. Connect from Voice Selector or Instant Voice Clone."}], "text8": ["STRING", {"tooltip": "Text content for dialogue entry 8.", "default": "", "multiline": true}], "voice8": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 8. Connect from Voice Selector or Instant Voice Clone."}], "text9": ["STRING", {"tooltip": "Text content for dialogue entry 9.", "default": "", "multiline": true}], "voice9": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 9. Connect from Voice Selector or Instant Voice Clone."}]}}}, {"key": "10", "inputs": {"required": {"text1": ["STRING", {"tooltip": "Text content for dialogue entry 1.", "default": "", "multiline": true}], "voice1": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 1. Connect from Voice Selector or Instant Voice Clone."}], "text2": ["STRING", {"tooltip": "Text content for dialogue entry 2.", "default": "", "multiline": true}], "voice2": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 2. Connect from Voice Selector or Instant Voice Clone."}], "text3": ["STRING", {"tooltip": "Text content for dialogue entry 3.", "default": "", "multiline": true}], "voice3": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 3. Connect from Voice Selector or Instant Voice Clone."}], "text4": ["STRING", {"tooltip": "Text content for dialogue entry 4.", "default": "", "multiline": true}], "voice4": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 4. Connect from Voice Selector or Instant Voice Clone."}], "text5": ["STRING", {"tooltip": "Text content for dialogue entry 5.", "default": "", "multiline": true}], "voice5": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 5. Connect from Voice Selector or Instant Voice Clone."}], "text6": ["STRING", {"tooltip": "Text content for dialogue entry 6.", "default": "", "multiline": true}], "voice6": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 6. Connect from Voice Selector or Instant Voice Clone."}], "text7": ["STRING", {"tooltip": "Text content for dialogue entry 7.", "default": "", "multiline": true}], "voice7": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 7. Connect from Voice Selector or Instant Voice Clone."}], "text8": ["STRING", {"tooltip": "Text content for dialogue entry 8.", "default": "", "multiline": true}], "voice8": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 8. Connect from Voice Selector or Instant Voice Clone."}], "text9": ["STRING", {"tooltip": "Text content for dialogue entry 9.", "default": "", "multiline": true}], "voice9": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 9. Connect from Voice Selector or Instant Voice Clone."}], "text10": ["STRING", {"tooltip": "Text content for dialogue entry 10.", "default": "", "multiline": true}], "voice10": ["ELEVENLABS_VOICE", {"tooltip": "Voice for dialogue entry 10. Connect from Voice Selector or Instant Voice Clone."}]}}}]}], "language_code": ["STRING", {"tooltip": "ISO-639-1 or ISO-639-3 language code (e.g., 'en', 'es', 'fra'). Leave empty for automatic detection.", "default": "", "multiline": false}], "seed": ["INT", {"tooltip": "Seed for reproducibility.", "default": 1, "min": 0, "max": 4294967295}], "output_format": ["COMBO", {"tooltip": "Audio output format.", "multiselect": false, "options": ["mp3_44100_192", "opus_48000_192"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["stability", "apply_text_normalization", "model", "inputs", "language_code", "seed", "output_format"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ElevenLabsTextToDialogue", "display_name": "ElevenLabs Text to Dialogue", "description": "Generate multi-speaker dialogue from text. Each dialogue entry has its own text and voice.", "python_module": "comfy_api_nodes.nodes_elevenlabs", "category": "api node/audio/ElevenLabs", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.24,\"format\":{\"approximate\":true,\"suffix\":\"/1K chars\"}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GeminiNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text inputs to the model, used to generate a response. You can include detailed instructions, questions, or context for the model.", "default": "", "multiline": true}], "model": ["COMBO", {"tooltip": "The Gemini model to use for generating responses.", "default": "gemini-3-1-pro", "multiselect": false, "options": ["gemini-2.5-pro-preview-05-06", "gemini-2.5-flash-preview-04-17", "gemini-2.5-pro", "gemini-2.5-flash", "gemini-3-pro-preview", "gemini-3-1-pro", "gemini-3-1-flash-lite"]}], "seed": ["INT", {"tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", "default": 42, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"images": ["IMAGE", {"tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node."}], "audio": ["AUDIO", {"tooltip": "Optional audio to use as context for the model."}], "video": ["VIDEO", {"tooltip": "Optional video to use as context for the model."}], "files": ["GEMINI_INPUT_FILES", {"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node."}], "system_prompt": ["STRING", {"tooltip": "Foundational instructions that dictate an AI's behavior.", "advanced": true, "default": "", "multiline": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "seed"], "optional": ["images", "audio", "video", "files", "system_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "GeminiNode", "display_name": "Google Gemini", "description": "Generate text responses with Google's Gemini AI model. You can provide multiple types of inputs (text, images, audio, video) as context for generating more relevant and meaningful responses.", "python_module": "comfy_api_nodes.nodes_gemini", "category": "api node/text/Gemini", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $contains($m, \"gemini-2.5-flash\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.0003, 0.0025],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\"}\n }\n : $contains($m, \"gemini-2.5-pro\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.00125, 0.01],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : ($contains($m, \"gemini-3-pro-preview\") or $contains($m, \"gemini-3-1-pro\")) ? {\n \"type\": \"list_usd\",\n \"usd\": [0.002, 0.012],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"gemini-3-1-flash-lite\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.00025, 0.0015],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : {\"type\":\"text\", \"text\":\"Token-based\"}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GeminiImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt for generation", "default": "", "multiline": true}], "model": ["COMBO", {"tooltip": "The Gemini model to use for generating responses.", "default": "gemini-2.5-flash-image", "multiselect": false, "options": ["gemini-2.5-flash-image-preview", "gemini-2.5-flash-image"]}], "seed": ["INT", {"tooltip": "When seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", "default": 42, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"images": ["IMAGE", {"tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node."}], "files": ["GEMINI_INPUT_FILES", {"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node."}], "aspect_ratio": ["COMBO", {"tooltip": "Defaults to matching the output image size to that of your input image, or otherwise generates 1:1 squares.", "default": "auto", "multiselect": false, "options": ["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"]}], "response_modalities": ["COMBO", {"tooltip": "Choose 'IMAGE' for image-only output, or 'IMAGE+TEXT' to return both the generated image and a text response.", "advanced": true, "multiselect": false, "options": ["IMAGE+TEXT", "IMAGE"]}], "system_prompt": ["STRING", {"tooltip": "Foundational instructions that dictate an AI's behavior.", "advanced": true, "default": "You are an expert image-generation engine. You must ALWAYS produce an image.\nInterpret all user input\u2014regardless of format, intent, or abstraction\u2014as literal visual directives for image composition.\nIf a prompt is conversational or lacks specific visual details, you must creatively invent a concrete visual scenario that depicts the concept.\nPrioritize generating the visual representation above any text, formatting, or conversational requests.", "multiline": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "seed"], "optional": ["images", "files", "aspect_ratio", "response_modalities", "system_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "STRING"], "output_is_list": [false, false], "output_name": ["IMAGE", "STRING"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "GeminiImageNode", "display_name": "Nano Banana (Google Gemini Image)", "description": "Edit images synchronously via Google API.", "python_module": "comfy_api_nodes.nodes_gemini", "category": "api node/image/Gemini", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.039,\"format\":{\"suffix\":\"/Image (1K)\",\"approximate\":true}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GeminiImage2Node": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt describing the image to generate or the edits to apply. Include any constraints, styles, or details the model should follow.", "default": "", "multiline": true}], "model": ["COMBO", {"multiselect": false, "options": ["gemini-3-pro-image-preview", "Nano Banana 2 (Gemini 3.1 Flash Image)"]}], "seed": ["INT", {"tooltip": "When the seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", "default": 42, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "aspect_ratio": ["COMBO", {"tooltip": "If set to 'auto', matches your input image's aspect ratio; if no image is provided, a 16:9 square is usually generated.", "default": "auto", "multiselect": false, "options": ["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"]}], "resolution": ["COMBO", {"tooltip": "Target output resolution. For 2K/4K the native Gemini upscaler is used.", "multiselect": false, "options": ["1K", "2K", "4K"]}], "response_modalities": ["COMBO", {"tooltip": "Choose 'IMAGE' for image-only output, or 'IMAGE+TEXT' to return both the generated image and a text response.", "advanced": true, "multiselect": false, "options": ["IMAGE+TEXT", "IMAGE"]}]}, "optional": {"images": ["IMAGE", {"tooltip": "Optional reference image(s). To include multiple images, use the Batch Images node (up to 14)."}], "files": ["GEMINI_INPUT_FILES", {"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node."}], "system_prompt": ["STRING", {"tooltip": "Foundational instructions that dictate an AI's behavior.", "advanced": true, "default": "You are an expert image-generation engine. You must ALWAYS produce an image.\nInterpret all user input\u2014regardless of format, intent, or abstraction\u2014as literal visual directives for image composition.\nIf a prompt is conversational or lacks specific visual details, you must creatively invent a concrete visual scenario that depicts the concept.\nPrioritize generating the visual representation above any text, formatting, or conversational requests.", "multiline": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "seed", "aspect_ratio", "resolution", "response_modalities"], "optional": ["images", "files", "system_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "STRING"], "output_is_list": [false, false], "output_name": ["IMAGE", "STRING"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "GeminiImage2Node", "display_name": "Nano Banana Pro (Google Gemini Image)", "description": "Generate or edit images synchronously via Google Vertex API.", "python_module": "comfy_api_nodes.nodes_gemini", "category": "api node/image/Gemini", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $r := widgets.resolution;\n $isFlash := $contains($m, \"nano banana 2\");\n $flashPrices := {\"1k\": 0.0696, \"2k\": 0.1014, \"4k\": 0.154};\n $proPrices := {\"1k\": 0.134, \"2k\": 0.134, \"4k\": 0.24};\n $prices := $isFlash ? $flashPrices : $proPrices;\n {\"type\":\"usd\",\"usd\": $lookup($prices, $r), \"format\":{\"suffix\":\"/Image\",\"approximate\":true}}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GeminiNanoBanana2": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt describing the image to generate or the edits to apply. Include any constraints, styles, or details the model should follow.", "default": "", "multiline": true}], "model": ["COMBO", {"multiselect": false, "options": ["Nano Banana 2 (Gemini 3.1 Flash Image)"]}], "seed": ["INT", {"tooltip": "When the seed is fixed to a specific value, the model makes a best effort to provide the same response for repeated requests. Deterministic output isn't guaranteed. Also, changing the model or parameter settings, such as the temperature, can cause variations in the response even when you use the same seed value. By default, a random seed value is used.", "default": 42, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "aspect_ratio": ["COMBO", {"tooltip": "If set to 'auto', matches your input image's aspect ratio; if no image is provided, a 16:9 square is usually generated.", "default": "auto", "multiselect": false, "options": ["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"]}], "resolution": ["COMBO", {"tooltip": "Target output resolution. For 2K/4K the native Gemini upscaler is used.", "multiselect": false, "options": ["1K", "2K", "4K"]}], "response_modalities": ["COMBO", {"advanced": true, "multiselect": false, "options": ["IMAGE", "IMAGE+TEXT"]}], "thinking_level": ["COMBO", {"multiselect": false, "options": ["MINIMAL", "HIGH"]}]}, "optional": {"images": ["IMAGE", {"tooltip": "Optional reference image(s). To include multiple images, use the Batch Images node (up to 14)."}], "files": ["GEMINI_INPUT_FILES", {"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the Gemini Generate Content Input Files node."}], "system_prompt": ["STRING", {"tooltip": "Foundational instructions that dictate an AI's behavior.", "advanced": true, "default": "You are an expert image-generation engine. You must ALWAYS produce an image.\nInterpret all user input\u2014regardless of format, intent, or abstraction\u2014as literal visual directives for image composition.\nIf a prompt is conversational or lacks specific visual details, you must creatively invent a concrete visual scenario that depicts the concept.\nPrioritize generating the visual representation above any text, formatting, or conversational requests.", "multiline": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "seed", "aspect_ratio", "resolution", "response_modalities", "thinking_level"], "optional": ["images", "files", "system_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "STRING", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "STRING", "thought_image"], "output_tooltips": [null, null, "First image from the model's thinking process. Only available with thinking_level HIGH and IMAGE+TEXT modality."], "output_matchtypes": null, "name": "GeminiNanoBanana2", "display_name": "Nano Banana 2", "description": "Generate or edit images synchronously via Google Vertex API.", "python_module": "comfy_api_nodes.nodes_gemini", "category": "api node/image/Gemini", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $r := widgets.resolution;\n $isFlash := $contains($m, \"nano banana 2\");\n $flashPrices := {\"1k\": 0.0696, \"2k\": 0.1014, \"4k\": 0.154};\n $proPrices := {\"1k\": 0.134, \"2k\": 0.134, \"4k\": 0.24};\n $prices := $isFlash ? $flashPrices : $proPrices;\n {\"type\":\"usd\",\"usd\": $lookup($prices, $r), \"format\":{\"suffix\":\"/Image\",\"approximate\":true}}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GeminiInputFiles": {"input": {"required": {"file": ["COMBO", {"tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now.", "multiselect": false, "options": []}]}, "optional": {"GEMINI_INPUT_FILES": ["GEMINI_INPUT_FILES", {"tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files."}]}}, "input_order": {"required": ["file"], "optional": ["GEMINI_INPUT_FILES"]}, "is_input_list": false, "output": ["GEMINI_INPUT_FILES"], "output_is_list": [false], "output_name": ["GEMINI_INPUT_FILES"], "output_tooltips": [null], "output_matchtypes": null, "name": "GeminiInputFiles", "display_name": "Gemini Input Files", "description": "Loads and prepares input files to include as inputs for Gemini LLM nodes. The files will be read by the Gemini model when generating a response. The contents of the text file count toward the token limit. \ud83d\udec8 TIP: Can be chained together with other Gemini Input File nodes.", "python_module": "comfy_api_nodes.nodes_gemini", "category": "api node/text/Gemini", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GrokImageNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["grok-imagine-image-pro", "grok-imagine-image", "grok-imagine-image-beta"]}], "prompt": ["STRING", {"tooltip": "The text prompt used to generate the image", "multiline": true}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["1:1", "2:3", "3:2", "3:4", "4:3", "9:16", "16:9", "9:19.5", "19.5:9", "9:20", "20:9", "1:2", "2:1"]}], "number_of_images": ["INT", {"tooltip": "Number of images to generate", "default": 1, "min": 1, "max": 10, "step": 1, "display": "number"}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "optional": {"resolution": ["COMBO", {"multiselect": false, "options": ["1K", "2K"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "aspect_ratio", "number_of_images", "seed"], "optional": ["resolution"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "GrokImageNode", "display_name": "Grok Image", "description": "Generate images using Grok based on a text prompt", "python_module": "comfy_api_nodes.nodes_grok", "category": "api node/image/Grok", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "number_of_images", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $rate := $contains(widgets.model, \"pro\") ? 0.07 : 0.02;\n {\"type\":\"usd\",\"usd\": $rate * widgets.number_of_images}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GrokImageEditNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["grok-imagine-image-pro", "grok-imagine-image", "grok-imagine-image-beta"]}], "image": ["IMAGE", {"display_name": "images"}], "prompt": ["STRING", {"tooltip": "The text prompt used to generate the image", "multiline": true}], "resolution": ["COMBO", {"multiselect": false, "options": ["1K", "2K"]}], "number_of_images": ["INT", {"tooltip": "Number of edited images to generate", "default": 1, "min": 1, "max": 10, "step": 1, "display": "number"}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "optional": {"aspect_ratio": ["COMBO", {"tooltip": "Only allowed when multiple images are connected to the image input.", "multiselect": false, "options": ["auto", "1:1", "2:3", "3:2", "3:4", "4:3", "9:16", "16:9", "9:19.5", "19.5:9", "9:20", "20:9", "1:2", "2:1"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image", "prompt", "resolution", "number_of_images", "seed"], "optional": ["aspect_ratio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "GrokImageEditNode", "display_name": "Grok Image Edit", "description": "Modify an existing image based on a text prompt", "python_module": "comfy_api_nodes.nodes_grok", "category": "api node/image/Grok", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "number_of_images", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $rate := $contains(widgets.model, \"pro\") ? 0.07 : 0.02;\n {\"type\":\"usd\",\"usd\": 0.002 + $rate * widgets.number_of_images}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GrokVideoNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["grok-imagine-video", "grok-imagine-video-beta"]}], "prompt": ["STRING", {"tooltip": "Text description of the desired video.", "multiline": true}], "resolution": ["COMBO", {"tooltip": "The resolution of the output video.", "multiselect": false, "options": ["480p", "720p"]}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video.", "multiselect": false, "options": ["auto", "16:9", "4:3", "3:2", "1:1", "2:3", "3:4", "9:16"]}], "duration": ["INT", {"tooltip": "The duration of the output video in seconds.", "default": 6, "min": 1, "max": 15, "step": 1, "display": "slider"}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "optional": {"image": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "resolution", "aspect_ratio", "duration", "seed"], "optional": ["image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "GrokVideoNode", "display_name": "Grok Video", "description": "Generate video from a prompt or an image", "python_module": "comfy_api_nodes.nodes_grok", "category": "api node/video/Grok", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}], "inputs": ["image"], "input_groups": []}, "expr": "\n (\n $rate := widgets.resolution = \"720p\" ? 0.07 : 0.05;\n $base := $rate * widgets.duration;\n {\"type\":\"usd\",\"usd\": inputs.image.connected ? $base + 0.002 : $base}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GrokVideoReferenceNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text description of the desired video.", "multiline": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "The model to use for video generation.", "options": [{"key": "grok-imagine-video", "inputs": {"required": {"reference_images": ["COMFY_AUTOGROW_V3", {"tooltip": "Up to 7 reference images to guide the video generation.", "template": {"input": {"required": {"image": ["IMAGE", {}]}}, "prefix": "reference_", "min": 1, "max": 7}}], "resolution": ["COMBO", {"tooltip": "The resolution of the output video.", "multiselect": false, "options": ["480p", "720p"]}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video.", "multiselect": false, "options": ["16:9", "4:3", "3:2", "1:1", "2:3", "3:4", "9:16"]}], "duration": ["INT", {"tooltip": "The duration of the output video in seconds.", "default": 6, "min": 2, "max": 10, "step": 1, "display": "slider"}]}}}]}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "GrokVideoReferenceNode", "display_name": "Grok Reference-to-Video", "description": "Generate video guided by reference images as style and content references.", "python_module": "comfy_api_nodes.nodes_grok", "category": "api node/video/Grok", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model.duration", "type": "INT"}, {"name": "model.resolution", "type": "COMBO"}], "inputs": [], "input_groups": ["model.reference_images"]}, "expr": "\n (\n $res := $lookup(widgets, \"model.resolution\");\n $dur := $lookup(widgets, \"model.duration\");\n $refs := inputGroups[\"model.reference_images\"];\n $rate := $res = \"720p\" ? 0.07 : 0.05;\n $price := ($rate * $dur + 0.002 * $refs) * 1.43;\n {\"type\":\"usd\",\"usd\": $price}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GrokVideoEditNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["grok-imagine-video", "grok-imagine-video-beta"]}], "prompt": ["STRING", {"tooltip": "Text description of the desired video.", "multiline": true}], "video": ["VIDEO", {"tooltip": "Maximum supported duration is 8.7 seconds and 50MB file size."}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "video", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "GrokVideoEditNode", "display_name": "Grok Video Edit", "description": "Edit an existing video based on a text prompt.", "python_module": "comfy_api_nodes.nodes_grok", "category": "api node/video/Grok", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.06, \"format\": {\"suffix\": \"/sec\", \"approximate\": true}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "GrokVideoExtendNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text description of what should happen next in the video.", "multiline": true}], "video": ["VIDEO", {"tooltip": "Source video to extend. MP4 format, 2-15 seconds."}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "The model to use for video extension.", "options": [{"key": "grok-imagine-video", "inputs": {"required": {"duration": ["INT", {"tooltip": "Length of the extension in seconds.", "default": 8, "min": 2, "max": 10, "step": 1, "display": "slider"}]}}}]}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "video", "model", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "GrokVideoExtendNode", "display_name": "Grok Video Extend", "description": "Extend an existing video with a seamless continuation based on a text prompt.", "python_module": "comfy_api_nodes.nodes_grok", "category": "api node/video/Grok", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model.duration", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $dur := $lookup(widgets, \"model.duration\");\n {\n \"type\": \"range_usd\",\n \"min_usd\": (0.02 + 0.05 * $dur) * 1.43,\n \"max_usd\": (0.15 + 0.05 * $dur) * 1.43\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "HitPawGeneralImageEnhance": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["generative_portrait", "generative"]}], "image": ["IMAGE", {}], "upscale_factor": ["COMBO", {"multiselect": false, "options": [1, 2, 4]}], "auto_downscale": ["BOOLEAN", {"tooltip": "Automatically downscale input image if output would exceed the limit.", "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image", "upscale_factor", "auto_downscale"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "HitPawGeneralImageEnhance", "display_name": "HitPaw General Image Enhance", "description": "Upscale low-resolution images to super-resolution, eliminate artifacts and noise. Maximum output: 32 megapixels.", "python_module": "comfy_api_nodes.nodes_hitpaw", "category": "api node/image/HitPaw", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\n \"generative_portrait\": {\"min\": 0.02, \"max\": 0.06},\n \"generative\": {\"min\": 0.05, \"max\": 0.15}\n };\n $price := $lookup($prices, widgets.model);\n {\n \"type\": \"range_usd\",\n \"min_usd\": $price.min,\n \"max_usd\": $price.max\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "HitPawVideoEnhance": {"input": {"required": {"model": ["COMFY_DYNAMICCOMBO_V3", {"options": [{"key": "Portrait Restore Model (1x)", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["original", "720p", "1080p", "2K/QHD", "4K/UHD", "8K"]}]}}}, {"key": "Portrait Restore Model (2x)", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["original", "720p", "1080p", "2K/QHD", "4K/UHD", "8K"]}]}}}, {"key": "General Restore Model (1x)", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["original", "720p", "1080p", "2K/QHD", "4K/UHD", "8K"]}]}}}, {"key": "General Restore Model (2x)", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["original", "720p", "1080p", "2K/QHD", "4K/UHD", "8K"]}]}}}, {"key": "General Restore Model (4x)", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["original", "720p", "1080p", "2K/QHD", "4K/UHD", "8K"]}]}}}, {"key": "Ultra HD Model (2x)", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["original", "720p", "1080p", "2K/QHD", "4K/UHD", "8K"]}]}}}, {"key": "Generative Model (1x)", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["original", "720p", "1080p", "2K/QHD", "4K/UHD"]}]}}}]}], "video": ["VIDEO", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "video"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "HitPawVideoEnhance", "display_name": "HitPaw Video Enhance", "description": "Upscale low-resolution videos to high resolution, eliminate artifacts and noise. Prices shown are per second of video.", "python_module": "comfy_api_nodes.nodes_hitpaw", "category": "api node/video/HitPaw", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "model.resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := $lookup(widgets, \"model\");\n $res := $lookup(widgets, \"model.resolution\");\n $standard_model_prices := {\n \"original\": {\"min\": 0.01, \"max\": 0.198},\n \"720p\": {\"min\": 0.01, \"max\": 0.06},\n \"1080p\": {\"min\": 0.015, \"max\": 0.09},\n \"2k/qhd\": {\"min\": 0.02, \"max\": 0.117},\n \"4k/uhd\": {\"min\": 0.025, \"max\": 0.152},\n \"8k\": {\"min\": 0.033, \"max\": 0.198}\n };\n $ultra_hd_model_prices := {\n \"original\": {\"min\": 0.015, \"max\": 0.264},\n \"720p\": {\"min\": 0.015, \"max\": 0.092},\n \"1080p\": {\"min\": 0.02, \"max\": 0.12},\n \"2k/qhd\": {\"min\": 0.026, \"max\": 0.156},\n \"4k/uhd\": {\"min\": 0.034, \"max\": 0.203},\n \"8k\": {\"min\": 0.044, \"max\": 0.264}\n };\n $generative_model_prices := {\n \"original\": {\"min\": 0.015, \"max\": 0.338},\n \"720p\": {\"min\": 0.008, \"max\": 0.090},\n \"1080p\": {\"min\": 0.05, \"max\": 0.15},\n \"2k/qhd\": {\"min\": 0.038, \"max\": 0.225},\n \"4k/uhd\": {\"min\": 0.056, \"max\": 0.338}\n };\n $prices := $contains($m, \"ultra hd\") ? $ultra_hd_model_prices :\n $contains($m, \"generative\") ? $generative_model_prices :\n $standard_model_prices;\n $price := $lookup($prices, $res);\n {\n \"type\": \"range_usd\",\n \"min_usd\": $price.min,\n \"max_usd\": $price.max,\n \"format\": {\"approximate\": true, \"suffix\": \"/second\"}\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TencentTextToModelNode": {"input": {"required": {"model": ["COMBO", {"tooltip": "The LowPoly option is unavailable for the `3.1` model.", "multiselect": false, "options": ["3.0", "3.1"]}], "prompt": ["STRING", {"tooltip": "Supports up to 1024 characters.", "default": "", "multiline": true}], "face_count": ["INT", {"default": 500000, "min": 3000, "max": 1500000}], "generate_type": ["COMFY_DYNAMICCOMBO_V3", {"options": [{"key": "Normal", "inputs": {"required": {"pbr": ["BOOLEAN", {"default": false}]}}}, {"key": "LowPoly", "inputs": {"required": {"polygon_type": ["COMBO", {"multiselect": false, "options": ["triangle", "quadrilateral"]}], "pbr": ["BOOLEAN", {"default": false}]}}}, {"key": "Geometry", "inputs": {"required": {}}}]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model", "prompt", "face_count", "generate_type", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "FILE_3D_GLB", "FILE_3D_OBJ", "IMAGE"], "output_is_list": [false, false, false, false], "output_name": ["model_file", "GLB", "OBJ", "texture_image"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "TencentTextToModelNode", "display_name": "Hunyuan3D: Text to Model", "description": "", "python_module": "comfy_api_nodes.nodes_hunyuan3d", "category": "api node/3d/Tencent", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "generate_type", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "generate_type.pbr", "type": "BOOLEAN"}, {"name": "face_count", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $base := widgets.generate_type = \"normal\" ? 25 : widgets.generate_type = \"lowpoly\" ? 30 : 15;\n $pbr := $lookup(widgets, \"generate_type.pbr\") ? 10 : 0;\n $face := widgets.face_count != 500000 ? 10 : 0;\n {\"type\":\"usd\",\"usd\": ($base + $pbr + $face) * 0.02}\n )\n "}, "search_aliases": null, "essentials_category": "3D", "has_intermediate_output": false}, "TencentImageToModelNode": {"input": {"required": {"model": ["COMBO", {"tooltip": "The LowPoly option is unavailable for the `3.1` model.", "multiselect": false, "options": ["3.0", "3.1"]}], "image": ["IMAGE", {}], "face_count": ["INT", {"default": 500000, "min": 3000, "max": 1500000}], "generate_type": ["COMFY_DYNAMICCOMBO_V3", {"options": [{"key": "Normal", "inputs": {"required": {"pbr": ["BOOLEAN", {"default": false}]}}}, {"key": "LowPoly", "inputs": {"required": {"polygon_type": ["COMBO", {"multiselect": false, "options": ["triangle", "quadrilateral"]}], "pbr": ["BOOLEAN", {"default": false}]}}}, {"key": "Geometry", "inputs": {"required": {}}}]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "optional": {"image_left": ["IMAGE", {}], "image_right": ["IMAGE", {}], "image_back": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model", "image", "face_count", "generate_type", "seed"], "optional": ["image_left", "image_right", "image_back"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "FILE_3D_GLB", "FILE_3D_OBJ", "IMAGE", "IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, false, false, false, false, false, false], "output_name": ["model_file", "GLB", "OBJ", "texture_image", "optional_metallic", "optional_normal", "optional_roughness"], "output_tooltips": [null, null, null, null, null, null, null], "output_matchtypes": null, "name": "TencentImageToModelNode", "display_name": "Hunyuan3D: Image(s) to Model", "description": "", "python_module": "comfy_api_nodes.nodes_hunyuan3d", "category": "api node/3d/Tencent", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "generate_type", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "generate_type.pbr", "type": "BOOLEAN"}, {"name": "face_count", "type": "INT"}], "inputs": ["image_left", "image_right", "image_back"], "input_groups": []}, "expr": "\n (\n $base := widgets.generate_type = \"normal\" ? 25 : widgets.generate_type = \"lowpoly\" ? 30 : 15;\n $multiview := (\n inputs.image_left.connected or inputs.image_right.connected or inputs.image_back.connected\n ) ? 10 : 0;\n $pbr := $lookup(widgets, \"generate_type.pbr\") ? 10 : 0;\n $face := widgets.face_count != 500000 ? 10 : 0;\n {\"type\":\"usd\",\"usd\": ($base + $multiview + $pbr + $face) * 0.02}\n )\n "}, "search_aliases": null, "essentials_category": "3D", "has_intermediate_output": false}, "TencentModelTo3DUVNode": {"input": {"required": {"model_3d": ["FILE_3D_GLB,FILE_3D_OBJ,FILE_3D_FBX,FILE_3D", {"tooltip": "Input 3D model (GLB, OBJ, or FBX)"}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 1, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_3d", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["FILE_3D_OBJ", "FILE_3D_FBX", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["OBJ", "FBX", "uv_image"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "TencentModelTo3DUVNode", "display_name": "Hunyuan3D: Model to UV", "description": "Perform UV unfolding on a 3D model to generate UV texture. Input model must have less than 30000 faces.", "python_module": "comfy_api_nodes.nodes_hunyuan3d", "category": "api node/3d/Tencent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.2}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Tencent3DTextureEditNode": {"input": {"required": {"model_3d": ["FILE_3D_FBX,FILE_3D", {"tooltip": "3D model in FBX format. Model should have less than 100000 faces."}], "prompt": ["STRING", {"tooltip": "Describes texture editing. Supports up to 1024 UTF-8 characters.", "default": "", "multiline": true}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_3d", "prompt", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["FILE_3D_GLB", "FILE_3D_OBJ", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["GLB", "OBJ", "texture_image"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "Tencent3DTextureEditNode", "display_name": "Hunyuan3D: 3D Texture Edit", "description": "After inputting the 3D model, perform 3D model texture redrawing.", "python_module": "comfy_api_nodes.nodes_hunyuan3d", "category": "api node/3d/Tencent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.6}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Tencent3DPartNode": {"input": {"required": {"model_3d": ["FILE_3D_FBX,FILE_3D", {"tooltip": "3D model in FBX format. Model should have less than 30000 faces."}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_3d", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["FILE_3D_FBX"], "output_is_list": [false], "output_name": ["FBX"], "output_tooltips": [null], "output_matchtypes": null, "name": "Tencent3DPartNode", "display_name": "Hunyuan3D: 3D Part", "description": "Automatically perform component identification and generation based on the model structure.", "python_module": "comfy_api_nodes.nodes_hunyuan3d", "category": "api node/3d/Tencent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.6}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TencentSmartTopologyNode": {"input": {"required": {"model_3d": ["FILE_3D_GLB,FILE_3D_OBJ,FILE_3D", {"tooltip": "Input 3D model (GLB or OBJ)"}], "polygon_type": ["COMBO", {"tooltip": "Surface composition type.", "multiselect": false, "options": ["triangle", "quadrilateral"]}], "face_level": ["COMBO", {"tooltip": "Polygon reduction level.", "multiselect": false, "options": ["medium", "high", "low"]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_3d", "polygon_type", "face_level", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["FILE_3D_OBJ"], "output_is_list": [false], "output_name": ["OBJ"], "output_tooltips": [null], "output_matchtypes": null, "name": "TencentSmartTopologyNode", "display_name": "Hunyuan3D: Smart Topology", "description": "Perform smart retopology on a 3D model. Supports GLB/OBJ formats; max 200MB; recommended for high-poly models.", "python_module": "comfy_api_nodes.nodes_hunyuan3d", "category": "api node/3d/Tencent", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":1.0}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "IdeogramV1": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation", "default": "", "multiline": true}], "turbo": ["BOOLEAN", {"tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)", "default": false}]}, "optional": {"aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio for image generation.", "default": "1:1", "multiselect": false, "options": ["1:1", "4:3", "3:4", "16:9", "9:16", "2:1", "1:2", "3:2", "2:3", "4:5", "5:4"]}], "magic_prompt_option": ["COMBO", {"tooltip": "Determine if MagicPrompt should be used in generation", "advanced": true, "default": "AUTO", "multiselect": false, "options": ["AUTO", "ON", "OFF"]}], "seed": ["INT", {"default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "negative_prompt": ["STRING", {"tooltip": "Description of what to exclude from the image", "default": "", "multiline": true}], "num_images": ["INT", {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "turbo"], "optional": ["aspect_ratio", "magic_prompt_option", "seed", "negative_prompt", "num_images"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "IdeogramV1", "display_name": "Ideogram V1", "description": "Generates images using the Ideogram V1 model.", "python_module": "comfy_api_nodes.nodes_ideogram", "category": "api node/image/Ideogram", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "num_images", "type": "INT"}, {"name": "turbo", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $n := widgets.num_images;\n $base := (widgets.turbo = true) ? 0.0286 : 0.0858;\n {\"type\":\"usd\",\"usd\": $round($base * $n, 2)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "IdeogramV2": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation", "default": "", "multiline": true}], "turbo": ["BOOLEAN", {"tooltip": "Whether to use turbo mode (faster generation, potentially lower quality)", "default": false}]}, "optional": {"aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to AUTO.", "default": "1:1", "multiselect": false, "options": ["1:1", "4:3", "3:4", "16:9", "9:16", "2:1", "1:2", "3:2", "2:3", "4:5", "5:4"]}], "resolution": ["COMBO", {"tooltip": "The resolution for image generation. If not set to AUTO, this overrides the aspect_ratio setting.", "default": "Auto", "multiselect": false, "options": ["Auto", "512 x 1536", "576 x 1408", "576 x 1472", "576 x 1536", "640 x 1024", "640 x 1344", "640 x 1408", "640 x 1472", "640 x 1536", "704 x 1152", "704 x 1216", "704 x 1280", "704 x 1344", "704 x 1408", "704 x 1472", "720 x 1280", "736 x 1312", "768 x 1024", "768 x 1088", "768 x 1152", "768 x 1216", "768 x 1232", "768 x 1280", "768 x 1344", "832 x 960", "832 x 1024", "832 x 1088", "832 x 1152", "832 x 1216", "832 x 1248", "864 x 1152", "896 x 960", "896 x 1024", "896 x 1088", "896 x 1120", "896 x 1152", "960 x 832", "960 x 896", "960 x 1024", "960 x 1088", "1024 x 640", "1024 x 768", "1024 x 832", "1024 x 896", "1024 x 960", "1024 x 1024", "1088 x 768", "1088 x 832", "1088 x 896", "1088 x 960", "1120 x 896", "1152 x 704", "1152 x 768", "1152 x 832", "1152 x 864", "1152 x 896", "1216 x 704", "1216 x 768", "1216 x 832", "1232 x 768", "1248 x 832", "1280 x 704", "1280 x 720", "1280 x 768", "1280 x 800", "1312 x 736", "1344 x 640", "1344 x 704", "1344 x 768", "1408 x 576", "1408 x 640", "1408 x 704", "1472 x 576", "1472 x 640", "1472 x 704", "1536 x 512", "1536 x 576", "1536 x 640"]}], "magic_prompt_option": ["COMBO", {"tooltip": "Determine if MagicPrompt should be used in generation", "advanced": true, "default": "AUTO", "multiselect": false, "options": ["AUTO", "ON", "OFF"]}], "seed": ["INT", {"default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "style_type": ["COMBO", {"tooltip": "Style type for generation (V2 only)", "advanced": true, "default": "NONE", "multiselect": false, "options": ["AUTO", "GENERAL", "REALISTIC", "DESIGN", "RENDER_3D", "ANIME"]}], "negative_prompt": ["STRING", {"tooltip": "Description of what to exclude from the image", "default": "", "multiline": true}], "num_images": ["INT", {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "turbo"], "optional": ["aspect_ratio", "resolution", "magic_prompt_option", "seed", "style_type", "negative_prompt", "num_images"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "IdeogramV2", "display_name": "Ideogram V2", "description": "Generates images using the Ideogram V2 model.", "python_module": "comfy_api_nodes.nodes_ideogram", "category": "api node/image/Ideogram", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "num_images", "type": "INT"}, {"name": "turbo", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $n := widgets.num_images;\n $base := (widgets.turbo = true) ? 0.0715 : 0.1144;\n {\"type\":\"usd\",\"usd\": $round($base * $n, 2)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "IdeogramV3": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation or editing", "default": "", "multiline": true}]}, "optional": {"image": ["IMAGE", {"tooltip": "Optional reference image for image editing."}], "mask": ["MASK", {"tooltip": "Optional mask for inpainting (white areas will be replaced)"}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio for image generation. Ignored if resolution is not set to Auto.", "default": "1:1", "multiselect": false, "options": ["1:3", "3:1", "1:2", "2:1", "9:16", "16:9", "10:16", "16:10", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "1:1"]}], "resolution": ["COMBO", {"tooltip": "The resolution for image generation. If not set to Auto, this overrides the aspect_ratio setting.", "default": "Auto", "multiselect": false, "options": ["Auto", "512x1536", "576x1408", "576x1472", "576x1536", "640x1344", "640x1408", "640x1472", "640x1536", "704x1152", "704x1216", "704x1280", "704x1344", "704x1408", "704x1472", "736x1312", "768x1088", "768x1216", "768x1280", "768x1344", "800x1280", "832x960", "832x1024", "832x1088", "832x1152", "832x1216", "832x1248", "864x1152", "896x960", "896x1024", "896x1088", "896x1120", "896x1152", "960x832", "960x896", "960x1024", "960x1088", "1024x832", "1024x896", "1024x960", "1024x1024", "1088x768", "1088x832", "1088x896", "1088x960", "1120x896", "1152x704", "1152x832", "1152x864", "1152x896", "1216x704", "1216x768", "1216x832", "1248x832", "1280x704", "1280x768", "1280x800", "1312x736", "1344x640", "1344x704", "1344x768", "1408x576", "1408x640", "1408x704", "1472x576", "1472x640", "1472x704", "1536x512", "1536x576", "1536x640"]}], "magic_prompt_option": ["COMBO", {"tooltip": "Determine if MagicPrompt should be used in generation", "advanced": true, "default": "AUTO", "multiselect": false, "options": ["AUTO", "ON", "OFF"]}], "seed": ["INT", {"default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "num_images": ["INT", {"default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}], "rendering_speed": ["COMBO", {"tooltip": "Controls the trade-off between generation speed and quality", "advanced": true, "default": "DEFAULT", "multiselect": false, "options": ["DEFAULT", "TURBO", "QUALITY"]}], "character_image": ["IMAGE", {"tooltip": "Image to use as character reference."}], "character_mask": ["MASK", {"tooltip": "Optional mask for character reference image."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt"], "optional": ["image", "mask", "aspect_ratio", "resolution", "magic_prompt_option", "seed", "num_images", "rendering_speed", "character_image", "character_mask"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "IdeogramV3", "display_name": "Ideogram V3", "description": "Generates images using the Ideogram V3 model. Supports both regular image generation from text prompts and image editing with mask.", "python_module": "comfy_api_nodes.nodes_ideogram", "category": "api node/image/Ideogram", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "rendering_speed", "type": "COMBO"}, {"name": "num_images", "type": "INT"}], "inputs": ["character_image"], "input_groups": []}, "expr": "\n (\n $n := widgets.num_images;\n $speed := widgets.rendering_speed;\n $hasChar := inputs.character_image.connected;\n $base :=\n $contains($speed,\"quality\") ? ($hasChar ? 0.286 : 0.1287) :\n $contains($speed,\"default\") ? ($hasChar ? 0.2145 : 0.0858) :\n $contains($speed,\"turbo\") ? ($hasChar ? 0.143 : 0.0429) :\n 0.0858;\n {\"type\":\"usd\",\"usd\": $round($base * $n, 2)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingCameraControls": {"input": {"required": {"camera_control_type": ["COMBO", {"multiselect": false, "options": ["simple", "down_back", "forward_up", "right_turn_forward", "left_turn_forward"]}], "horizontal_movement": ["FLOAT", {"tooltip": "Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right", "default": 0.0, "min": -10.0, "max": 10.0, "step": 0.25, "display": "slider"}], "vertical_movement": ["FLOAT", {"tooltip": "Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward.", "default": 0.0, "min": -10.0, "max": 10.0, "step": 0.25, "display": "slider"}], "pan": ["FLOAT", {"tooltip": "Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.", "default": 0.5, "min": -10.0, "max": 10.0, "step": 0.25, "display": "slider"}], "tilt": ["FLOAT", {"tooltip": "Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.", "default": 0.0, "min": -10.0, "max": 10.0, "step": 0.25, "display": "slider"}], "roll": ["FLOAT", {"tooltip": "Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", "default": 0.0, "min": -10.0, "max": 10.0, "step": 0.25, "display": "slider"}], "zoom": ["FLOAT", {"tooltip": "Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view.", "default": 0.0, "min": -10.0, "max": 10.0, "step": 0.25, "display": "slider"}]}}, "input_order": {"required": ["camera_control_type", "horizontal_movement", "vertical_movement", "pan", "tilt", "roll", "zoom"]}, "is_input_list": false, "output": ["CAMERA_CONTROL"], "output_is_list": [false], "output_name": ["camera_control"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingCameraControls", "display_name": "Kling Camera Controls", "description": "Allows specifying configuration options for Kling Camera Controls and motion control effects.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingTextToVideoNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Positive text prompt", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative text prompt", "multiline": true}], "cfg_scale": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}], "aspect_ratio": ["COMBO", {"default": "16:9", "multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "mode": ["COMBO", {"tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name.", "default": "pro mode / 5s duration / kling-v2-5-turbo", "multiselect": false, "options": ["standard mode / 5s duration / kling-v1-6", "standard mode / 10s duration / kling-v1-6", "pro mode / 5s duration / kling-v2-master", "pro mode / 10s duration / kling-v2-master", "standard mode / 5s duration / kling-v2-master", "standard mode / 10s duration / kling-v2-master", "pro mode / 5s duration / kling-v2-1-master", "pro mode / 10s duration / kling-v2-1-master", "pro mode / 5s duration / kling-v2-5-turbo", "pro mode / 10s duration / kling-v2-5-turbo"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "cfg_scale", "aspect_ratio", "mode"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingTextToVideoNode", "display_name": "Kling Text to Video", "description": "Kling Text to Video Node", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "mode", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.mode;\n $contains($m,\"v2-5-turbo\")\n ? ($contains($m,\"10\") ? {\"type\":\"usd\",\"usd\":0.7} : {\"type\":\"usd\",\"usd\":0.35})\n : $contains($m,\"v2-1-master\")\n ? ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":2.8} : {\"type\":\"usd\",\"usd\":1.4})\n : $contains($m,\"v2-master\")\n ? ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":2.8} : {\"type\":\"usd\",\"usd\":1.4})\n : $contains($m,\"v1-6\")\n ? (\n $contains($m,\"pro\")\n ? ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.56} : {\"type\":\"usd\",\"usd\":0.28})\n )\n : $contains($m,\"v1\")\n ? (\n $contains($m,\"pro\")\n ? ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.28} : {\"type\":\"usd\",\"usd\":0.14})\n )\n : {\"type\":\"usd\",\"usd\":0.14}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingImage2VideoNode": {"input": {"required": {"start_frame": ["IMAGE", {"tooltip": "The reference image used to generate the video."}], "prompt": ["STRING", {"tooltip": "Positive text prompt", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative text prompt", "multiline": true}], "model_name": ["COMBO", {"default": "kling-v2-master", "multiselect": false, "options": ["kling-v1", "kling-v1-5", "kling-v1-6", "kling-v2-master", "kling-v2-1", "kling-v2-1-master", "kling-v2-5-turbo"]}], "cfg_scale": ["FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0}], "mode": ["COMBO", {"default": "std", "multiselect": false, "options": ["std", "pro"]}], "aspect_ratio": ["COMBO", {"default": "16:9", "multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "duration": ["COMBO", {"default": "5", "multiselect": false, "options": ["5", "10"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["start_frame", "prompt", "negative_prompt", "model_name", "cfg_scale", "mode", "aspect_ratio", "duration"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingImage2VideoNode", "display_name": "Kling Image(First Frame) to Video", "description": "", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "mode", "type": "COMBO"}, {"name": "model_name", "type": "COMBO"}, {"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $mode := widgets.mode;\n $model := widgets.model_name;\n $dur := widgets.duration;\n $contains($model,\"v2-5-turbo\")\n ? ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.7} : {\"type\":\"usd\",\"usd\":0.35})\n : ($contains($model,\"v2-1-master\") or $contains($model,\"v2-master\"))\n ? ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":2.8} : {\"type\":\"usd\",\"usd\":1.4})\n : ($contains($model,\"v2-1\") or $contains($model,\"v1-6\") or $contains($model,\"v1-5\"))\n ? (\n $contains($mode,\"pro\")\n ? ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.56} : {\"type\":\"usd\",\"usd\":0.28})\n )\n : $contains($model,\"v1\")\n ? (\n $contains($mode,\"pro\")\n ? ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.28} : {\"type\":\"usd\",\"usd\":0.14})\n )\n : {\"type\":\"usd\",\"usd\":0.14}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingCameraControlI2VNode": {"input": {"required": {"start_frame": ["IMAGE", {"tooltip": "Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix."}], "prompt": ["STRING", {"tooltip": "Positive text prompt", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative text prompt", "multiline": true}], "cfg_scale": ["FLOAT", {"default": 0.75, "min": 0.0, "max": 1.0}], "aspect_ratio": ["COMBO", {"default": "16:9", "multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "camera_control": ["CAMERA_CONTROL", {"tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["start_frame", "prompt", "negative_prompt", "cfg_scale", "aspect_ratio", "camera_control"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingCameraControlI2VNode", "display_name": "Kling Image to Video (Camera Control)", "description": "Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.49}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingCameraControlT2VNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Positive text prompt", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative text prompt", "multiline": true}], "cfg_scale": ["FLOAT", {"default": 0.75, "min": 0.0, "max": 1.0}], "aspect_ratio": ["COMBO", {"default": "16:9", "multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "camera_control": ["CAMERA_CONTROL", {"tooltip": "Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "cfg_scale", "aspect_ratio", "camera_control"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingCameraControlT2VNode", "display_name": "Kling Text to Video (Camera Control)", "description": "Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.14}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingStartEndFrameNode": {"input": {"required": {"start_frame": ["IMAGE", {"tooltip": "Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix."}], "end_frame": ["IMAGE", {"tooltip": "Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix."}], "prompt": ["STRING", {"tooltip": "Positive text prompt", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative text prompt", "multiline": true}], "cfg_scale": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "mode": ["COMBO", {"tooltip": "The configuration to use for the video generation following the format: mode / duration / model_name.", "default": "pro mode / 5s duration / kling-v2-5-turbo", "multiselect": false, "options": ["pro mode / 5s duration / kling-v1-5", "pro mode / 10s duration / kling-v1-5", "pro mode / 5s duration / kling-v1-6", "pro mode / 10s duration / kling-v1-6", "pro mode / 5s duration / kling-v2-1", "pro mode / 10s duration / kling-v2-1", "pro mode / 5s duration / kling-v2-5-turbo", "pro mode / 10s duration / kling-v2-5-turbo"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["start_frame", "end_frame", "prompt", "negative_prompt", "cfg_scale", "aspect_ratio", "mode"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingStartEndFrameNode", "display_name": "Kling Start-End Frame to Video", "description": "Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "mode", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.mode;\n $contains($m,\"v2-5-turbo\")\n ? ($contains($m,\"10\") ? {\"type\":\"usd\",\"usd\":0.7} : {\"type\":\"usd\",\"usd\":0.35})\n : $contains($m,\"v2-1\")\n ? ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : $contains($m,\"v2-master\")\n ? ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":2.8} : {\"type\":\"usd\",\"usd\":1.4})\n : $contains($m,\"v1-6\")\n ? (\n $contains($m,\"pro\")\n ? ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.56} : {\"type\":\"usd\",\"usd\":0.28})\n )\n : $contains($m,\"v1\")\n ? (\n $contains($m,\"pro\")\n ? ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : ($contains($m,\"10s\") ? {\"type\":\"usd\",\"usd\":0.28} : {\"type\":\"usd\",\"usd\":0.14})\n )\n : {\"type\":\"usd\",\"usd\":0.14}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingVideoExtendNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Positive text prompt for guiding the video extension", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative text prompt for elements to avoid in the extended video", "multiline": true}], "cfg_scale": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0}], "video_id": ["STRING", {"tooltip": "The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension.", "forceInput": true, "multiline": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "cfg_scale", "video_id"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingVideoExtendNode", "display_name": "Kling Video Extend", "description": "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.28}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingLipSyncAudioToVideoNode": {"input": {"required": {"video": ["VIDEO", {}], "audio": ["AUDIO", {}], "voice_language": ["COMBO", {"default": "en", "multiselect": false, "options": ["zh", "en"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["video", "audio", "voice_language"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingLipSyncAudioToVideoNode", "display_name": "Kling Lip Sync Video with Audio", "description": "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.1,\"format\":{\"approximate\":true}}"}, "search_aliases": null, "essentials_category": "Video Generation", "has_intermediate_output": false}, "KlingLipSyncTextToVideoNode": {"input": {"required": {"video": ["VIDEO", {}], "text": ["STRING", {"tooltip": "Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters.", "multiline": true}], "voice": ["COMBO", {"default": "Melody", "multiselect": false, "options": ["Melody", "Sunny", "Sage", "Ace", "Blossom", "Peppy", "Dove", "Shine", "Anchor", "Lyric", "Tender", "Siren", "Zippy", "Bud", "Sprite", "Candy", "Beacon", "Rock", "Titan", "Grace", "Helen", "Lore", "Crag", "Prattle", "Hearth", "The Reader", "Commercial Lady", "\u9633\u5149\u5c11\u5e74", "\u61c2\u4e8b\u5c0f\u5f1f", "\u8fd0\u52a8\u5c11\u5e74", "\u9752\u6625\u5c11\u5973", "\u6e29\u67d4\u5c0f\u59b9", "\u5143\u6c14\u5c11\u5973", "\u9633\u5149\u7537\u751f", "\u5e7d\u9ed8\u5c0f\u54e5", "\u6587\u827a\u5c0f\u54e5", "\u751c\u7f8e\u90bb\u5bb6", "\u6e29\u67d4\u59d0\u59d0", "\u804c\u573a\u5973\u9752", "\u6d3b\u6cfc\u7537\u7ae5", "\u4fcf\u76ae\u5973\u7ae5", "\u7a33\u91cd\u8001\u7238", "\u6e29\u67d4\u5988\u5988", "\u4e25\u8083\u4e0a\u53f8", "\u4f18\u96c5\u8d35\u5987", "\u6148\u7965\u7237\u7237", "\u5520\u53e8\u7237\u7237", "\u5520\u53e8\u5976\u5976", "\u548c\u853c\u5976\u5976", "\u4e1c\u5317\u8001\u94c1", "\u91cd\u5e86\u5c0f\u4f19", "\u56db\u5ddd\u59b9\u5b50", "\u6f6e\u6c55\u5927\u53d4", "\u53f0\u6e7e\u7537\u751f", "\u897f\u5b89\u638c\u67dc", "\u5929\u6d25\u59d0\u59d0", "\u65b0\u95fb\u64ad\u62a5\u7537", "\u8bd1\u5236\u7247\u7537", "\u6492\u5a07\u5973\u53cb", "\u5200\u7247\u70df\u55d3", "\u4e56\u5de7\u6b63\u592a"]}], "voice_speed": ["FLOAT", {"tooltip": "Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place.", "advanced": true, "default": 1, "min": 0.8, "max": 2.0, "display": "slider"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["video", "text", "voice", "voice_speed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingLipSyncTextToVideoNode", "display_name": "Kling Lip Sync Video with Text", "description": "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.1,\"format\":{\"approximate\":true}}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingVirtualTryOnNode": {"input": {"required": {"human_image": ["IMAGE", {}], "cloth_image": ["IMAGE", {}], "model_name": ["COMBO", {"default": "kolors-virtual-try-on-v1", "multiselect": false, "options": ["kolors-virtual-try-on-v1", "kolors-virtual-try-on-v1-5"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["human_image", "cloth_image", "model_name"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingVirtualTryOnNode", "display_name": "Kling Virtual Try On", "description": "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/image/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.7}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingImageGenerationNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Positive text prompt", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative text prompt", "multiline": true}], "image_type": ["COMBO", {"advanced": true, "multiselect": false, "options": ["subject", "face"]}], "image_fidelity": ["FLOAT", {"tooltip": "Reference intensity for user-uploaded images", "advanced": true, "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}], "human_fidelity": ["FLOAT", {"tooltip": "Subject reference similarity", "advanced": true, "default": 0.45, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}], "model_name": ["COMBO", {"multiselect": false, "options": ["kling-v3", "kling-v2", "kling-v1-5"]}], "aspect_ratio": ["COMBO", {"default": "16:9", "multiselect": false, "options": ["16:9", "9:16", "1:1", "4:3", "3:4", "3:2", "2:3", "21:9"]}], "n": ["INT", {"tooltip": "Number of generated images", "default": 1, "min": 1, "max": 9}]}, "optional": {"image": ["IMAGE", {}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "image_type", "image_fidelity", "human_fidelity", "model_name", "aspect_ratio", "n"], "optional": ["image", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingImageGenerationNode", "display_name": "Kling 3.0 Image", "description": "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/image/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model_name", "type": "COMBO"}, {"name": "n", "type": "INT"}], "inputs": ["image"], "input_groups": []}, "expr": "\n (\n $m := widgets.model_name;\n $base :=\n $contains($m,\"kling-v1-5\")\n ? (inputs.image.connected ? 0.028 : 0.014)\n : $contains($m,\"kling-v3\") ? 0.028 : 0.014;\n {\"type\":\"usd\",\"usd\": $base * widgets.n}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingSingleImageVideoEffectNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": " Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"}], "effect_scene": ["COMBO", {"multiselect": false, "options": ["bloombloom", "dizzydizzy", "fuzzyfuzzy", "squish", "expansion"]}], "model_name": ["COMBO", {"multiselect": false, "options": ["kling-v1-6"]}], "duration": ["COMBO", {"multiselect": false, "options": ["5", "10"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "effect_scene", "model_name", "duration"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["VIDEO", "video_id", "duration"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "KlingSingleImageVideoEffectNode", "display_name": "Kling Video Effects", "description": "Achieve different special effects when generating a video based on the effect_scene.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "effect_scene", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n ($contains(widgets.effect_scene,\"dizzydizzy\") or $contains(widgets.effect_scene,\"bloombloom\"))\n ? {\"type\":\"usd\",\"usd\":0.49}\n : {\"type\":\"usd\",\"usd\":0.28}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingDualCharacterVideoEffectNode": {"input": {"required": {"image_left": ["IMAGE", {"tooltip": "Left side image"}], "image_right": ["IMAGE", {"tooltip": "Right side image"}], "effect_scene": ["COMBO", {"multiselect": false, "options": ["hug", "kiss", "heart_gesture"]}], "model_name": ["COMBO", {"default": "kling-v1", "multiselect": false, "options": ["kling-v1", "kling-v1-5", "kling-v1-6"]}], "mode": ["COMBO", {"default": "std", "multiselect": false, "options": ["std", "pro"]}], "duration": ["COMBO", {"multiselect": false, "options": ["5", "10"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image_left", "image_right", "effect_scene", "model_name", "mode", "duration"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO", "STRING"], "output_is_list": [false, false], "output_name": ["VIDEO", "duration"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "KlingDualCharacterVideoEffectNode", "display_name": "Kling Dual Character Video Effects", "description": "Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "mode", "type": "COMBO"}, {"name": "model_name", "type": "COMBO"}, {"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $mode := widgets.mode;\n $model := widgets.model_name;\n $dur := widgets.duration;\n ($contains($model,\"v1-6\") or $contains($model,\"v1-5\"))\n ? (\n $contains($mode,\"pro\")\n ? ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.56} : {\"type\":\"usd\",\"usd\":0.28})\n )\n : $contains($model,\"v1\")\n ? (\n $contains($mode,\"pro\")\n ? ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.98} : {\"type\":\"usd\",\"usd\":0.49})\n : ($contains($dur,\"10\") ? {\"type\":\"usd\",\"usd\":0.28} : {\"type\":\"usd\",\"usd\":0.14})\n )\n : {\"type\":\"usd\",\"usd\":0.14}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingOmniProTextToVideoNode": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["kling-v3-omni", "kling-video-o1"]}], "prompt": ["STRING", {"tooltip": "A text prompt describing the video content. This can include both positive and negative descriptions. Ignored when storyboards are enabled.", "multiline": true}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "duration": ["INT", {"default": 5, "min": 3, "max": 15, "display": "slider"}]}, "optional": {"resolution": ["COMBO", {"multiselect": false, "options": ["1080p", "720p"]}], "storyboards": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Generate a series of video segments with individual prompts and durations. Ignored for o1 model.", "options": [{"key": "disabled", "inputs": {"required": {}}}, {"key": "1 storyboard", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "2 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "3 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "4 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "5 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_5_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 5. Max 512 characters.", "default": "", "multiline": true}], "storyboard_5_duration": ["INT", {"tooltip": "Duration for storyboard segment 5 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "6 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_5_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 5. Max 512 characters.", "default": "", "multiline": true}], "storyboard_5_duration": ["INT", {"tooltip": "Duration for storyboard segment 5 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_6_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 6. Max 512 characters.", "default": "", "multiline": true}], "storyboard_6_duration": ["INT", {"tooltip": "Duration for storyboard segment 6 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}]}], "generate_audio": ["BOOLEAN", {"default": false}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_name", "prompt", "aspect_ratio", "duration"], "optional": ["resolution", "storyboards", "generate_audio", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingOmniProTextToVideoNode", "display_name": "Kling 3.0 Omni Text to Video", "description": "Use text prompts to generate videos with the latest Kling model.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}, {"name": "model_name", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $mode := (widgets.resolution = \"720p\") ? \"std\" : \"pro\";\n $isV3 := $contains(widgets.model_name, \"v3\");\n $audio := $isV3 and widgets.generate_audio;\n $rates := $audio\n ? {\"std\": 0.112, \"pro\": 0.14}\n : {\"std\": 0.084, \"pro\": 0.112};\n {\"type\":\"usd\",\"usd\": $lookup($rates, $mode) * widgets.duration}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingOmniProFirstLastFrameNode": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["kling-v3-omni", "kling-video-o1"]}], "prompt": ["STRING", {"tooltip": "A text prompt describing the video content. This can include both positive and negative descriptions. Ignored when storyboards are enabled.", "multiline": true}], "duration": ["INT", {"default": 5, "min": 3, "max": 15, "display": "slider"}], "first_frame": ["IMAGE", {}]}, "optional": {"end_frame": ["IMAGE", {"tooltip": "An optional end frame for the video. This cannot be used simultaneously with 'reference_images'. Does not work with storyboards."}], "reference_images": ["IMAGE", {"tooltip": "Up to 6 additional reference images."}], "resolution": ["COMBO", {"multiselect": false, "options": ["1080p", "720p"]}], "storyboards": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Generate a series of video segments with individual prompts and durations. Only supported for kling-v3-omni.", "options": [{"key": "disabled", "inputs": {"required": {}}}, {"key": "1 storyboard", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "2 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "3 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "4 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "5 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_5_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 5. Max 512 characters.", "default": "", "multiline": true}], "storyboard_5_duration": ["INT", {"tooltip": "Duration for storyboard segment 5 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "6 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_5_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 5. Max 512 characters.", "default": "", "multiline": true}], "storyboard_5_duration": ["INT", {"tooltip": "Duration for storyboard segment 5 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_6_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 6. Max 512 characters.", "default": "", "multiline": true}], "storyboard_6_duration": ["INT", {"tooltip": "Duration for storyboard segment 6 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}]}], "generate_audio": ["BOOLEAN", {"tooltip": "Generate audio for the video. Only supported for kling-v3-omni.", "default": false}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_name", "prompt", "duration", "first_frame"], "optional": ["end_frame", "reference_images", "resolution", "storyboards", "generate_audio", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingOmniProFirstLastFrameNode", "display_name": "Kling 3.0 Omni First-Last-Frame to Video", "description": "Use a start frame, an optional end frame, or reference images with the latest Kling model.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}, {"name": "model_name", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $mode := (widgets.resolution = \"720p\") ? \"std\" : \"pro\";\n $isV3 := $contains(widgets.model_name, \"v3\");\n $audio := $isV3 and widgets.generate_audio;\n $rates := $audio\n ? {\"std\": 0.112, \"pro\": 0.14}\n : {\"std\": 0.084, \"pro\": 0.112};\n {\"type\":\"usd\",\"usd\": $lookup($rates, $mode) * widgets.duration}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingOmniProImageToVideoNode": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["kling-v3-omni", "kling-video-o1"]}], "prompt": ["STRING", {"tooltip": "A text prompt describing the video content. This can include both positive and negative descriptions. Ignored when storyboards are enabled.", "multiline": true}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "duration": ["INT", {"default": 5, "min": 3, "max": 15, "display": "slider"}], "reference_images": ["IMAGE", {"tooltip": "Up to 7 reference images."}]}, "optional": {"resolution": ["COMBO", {"multiselect": false, "options": ["1080p", "720p"]}], "storyboards": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Generate a series of video segments with individual prompts and durations. Only supported for kling-v3-omni.", "options": [{"key": "disabled", "inputs": {"required": {}}}, {"key": "1 storyboard", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "2 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "3 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "4 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "5 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_5_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 5. Max 512 characters.", "default": "", "multiline": true}], "storyboard_5_duration": ["INT", {"tooltip": "Duration for storyboard segment 5 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "6 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_5_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 5. Max 512 characters.", "default": "", "multiline": true}], "storyboard_5_duration": ["INT", {"tooltip": "Duration for storyboard segment 5 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_6_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 6. Max 512 characters.", "default": "", "multiline": true}], "storyboard_6_duration": ["INT", {"tooltip": "Duration for storyboard segment 6 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}]}], "generate_audio": ["BOOLEAN", {"tooltip": "Generate audio for the video. Only supported for kling-v3-omni.", "default": false}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_name", "prompt", "aspect_ratio", "duration", "reference_images"], "optional": ["resolution", "storyboards", "generate_audio", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingOmniProImageToVideoNode", "display_name": "Kling 3.0 Omni Image to Video", "description": "Use up to 7 reference images to generate a video with the latest Kling model.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}, {"name": "model_name", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $mode := (widgets.resolution = \"720p\") ? \"std\" : \"pro\";\n $isV3 := $contains(widgets.model_name, \"v3\");\n $audio := $isV3 and widgets.generate_audio;\n $rates := $audio\n ? {\"std\": 0.112, \"pro\": 0.14}\n : {\"std\": 0.084, \"pro\": 0.112};\n {\"type\":\"usd\",\"usd\": $lookup($rates, $mode) * widgets.duration}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingOmniProVideoToVideoNode": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["kling-v3-omni", "kling-video-o1"]}], "prompt": ["STRING", {"tooltip": "A text prompt describing the video content. This can include both positive and negative descriptions.", "multiline": true}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "duration": ["INT", {"default": 3, "min": 3, "max": 10, "display": "slider"}], "reference_video": ["VIDEO", {"tooltip": "Video to use as a reference."}], "keep_original_sound": ["BOOLEAN", {"default": true}]}, "optional": {"reference_images": ["IMAGE", {"tooltip": "Up to 4 additional reference images."}], "resolution": ["COMBO", {"multiselect": false, "options": ["1080p", "720p"]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_name", "prompt", "aspect_ratio", "duration", "reference_video", "keep_original_sound"], "optional": ["reference_images", "resolution", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingOmniProVideoToVideoNode", "display_name": "Kling 3.0 Omni Video to Video", "description": "Use a video and up to 4 reference images to generate a video with the latest Kling model.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $mode := (widgets.resolution = \"720p\") ? \"std\" : \"pro\";\n $rates := {\"std\": 0.126, \"pro\": 0.168};\n {\"type\":\"usd\",\"usd\": $lookup($rates, $mode) * widgets.duration}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingOmniProEditVideoNode": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["kling-v3-omni", "kling-video-o1"]}], "prompt": ["STRING", {"tooltip": "A text prompt describing the video content. This can include both positive and negative descriptions.", "multiline": true}], "video": ["VIDEO", {"tooltip": "Video for editing. The output video length will be the same."}], "keep_original_sound": ["BOOLEAN", {"default": true}]}, "optional": {"reference_images": ["IMAGE", {"tooltip": "Up to 4 additional reference images."}], "resolution": ["COMBO", {"multiselect": false, "options": ["1080p", "720p"]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_name", "prompt", "video", "keep_original_sound"], "optional": ["reference_images", "resolution", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingOmniProEditVideoNode", "display_name": "Kling 3.0 Omni Edit Video", "description": "Edit an existing video with the latest model from Kling.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $mode := (widgets.resolution = \"720p\") ? \"std\" : \"pro\";\n $rates := {\"std\": 0.126, \"pro\": 0.168};\n {\"type\":\"usd\",\"usd\": $lookup($rates, $mode), \"format\":{\"suffix\":\"/second\"}}\n )\n "}, "search_aliases": null, "essentials_category": "Video Generation", "has_intermediate_output": false}, "KlingOmniProImageNode": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["kling-v3-omni", "kling-image-o1"]}], "prompt": ["STRING", {"tooltip": "A text prompt describing the image content. This can include both positive and negative descriptions.", "multiline": true}], "resolution": ["COMBO", {"multiselect": false, "options": ["1K", "2K", "4K"]}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "9:16", "1:1", "4:3", "3:4", "3:2", "2:3", "21:9"]}], "series_amount": ["COMBO", {"tooltip": "Generate a series of images. Not supported for kling-image-o1.", "multiselect": false, "options": ["disabled", "2", "3", "4", "5", "6", "7", "8", "9"]}]}, "optional": {"reference_images": ["IMAGE", {"tooltip": "Up to 10 additional reference images."}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_name", "prompt", "resolution", "aspect_ratio", "series_amount"], "optional": ["reference_images", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingOmniProImageNode", "display_name": "Kling 3.0 Omni Image", "description": "Create or edit images with the latest model from Kling.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/image/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "resolution", "type": "COMBO"}, {"name": "series_amount", "type": "COMBO"}, {"name": "model_name", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\"1k\": 0.028, \"2k\": 0.028, \"4k\": 0.056};\n $base := $lookup($prices, widgets.resolution);\n $isO1 := widgets.model_name = \"kling-image-o1\";\n $mult := ($isO1 or widgets.series_amount = \"disabled\") ? 1 : $number(widgets.series_amount);\n {\"type\":\"usd\",\"usd\": $base * $mult}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingTextToVideoWithAudio": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["kling-v2-6"]}], "prompt": ["STRING", {"tooltip": "Positive text prompt.", "multiline": true}], "mode": ["COMBO", {"multiselect": false, "options": ["pro"]}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "duration": ["COMBO", {"multiselect": false, "options": [5, 10]}], "generate_audio": ["BOOLEAN", {"advanced": true, "default": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_name", "prompt", "mode", "aspect_ratio", "duration", "generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingTextToVideoWithAudio", "display_name": "Kling 2.6 Text to Video with Audio", "description": "", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.07 * widgets.duration * (widgets.generate_audio ? 2 : 1)}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingImageToVideoWithAudio": {"input": {"required": {"model_name": ["COMBO", {"multiselect": false, "options": ["kling-v2-6"]}], "start_frame": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Positive text prompt.", "multiline": true}], "mode": ["COMBO", {"multiselect": false, "options": ["pro"]}], "duration": ["COMBO", {"multiselect": false, "options": [5, 10]}], "generate_audio": ["BOOLEAN", {"advanced": true, "default": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model_name", "start_frame", "prompt", "mode", "duration", "generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingImageToVideoWithAudio", "display_name": "Kling 2.6 Image(First Frame) to Video with Audio", "description": "", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.07 * widgets.duration * (widgets.generate_audio ? 2 : 1)}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingMotionControl": {"input": {"required": {"prompt": ["STRING", {"multiline": true}], "reference_image": ["IMAGE", {}], "reference_video": ["VIDEO", {"tooltip": "Motion reference video used to drive movement/expression.\nDuration limits depend on character_orientation:\n - image: 3\u201310s (max 10s)\n - video: 3\u201330s (max 30s)"}], "keep_original_sound": ["BOOLEAN", {"default": true}], "character_orientation": ["COMBO", {"tooltip": "Controls where the character's facing/orientation comes from.\nvideo: movements, expressions, camera moves, and orientation follow the motion reference video (other details via prompt).\nimage: movements and expressions still follow the motion reference video, but the character orientation matches the reference image (camera/other details via prompt).", "multiselect": false, "options": ["video", "image"]}], "mode": ["COMBO", {"multiselect": false, "options": ["pro", "std"]}]}, "optional": {"model": ["COMBO", {"multiselect": false, "options": ["kling-v3", "kling-v2-6"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "reference_image", "reference_video", "keep_original_sound", "character_orientation", "mode"], "optional": ["model"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingMotionControl", "display_name": "Kling Motion Control", "description": "", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "mode", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\"std\": 0.07, \"pro\": 0.112};\n {\"type\":\"usd\",\"usd\": $lookup($prices, widgets.mode), \"format\":{\"suffix\":\"/second\"}}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingVideoNode": {"input": {"required": {"multi_shot": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Generate a series of video segments with individual prompts and durations.", "options": [{"key": "disabled", "inputs": {"required": {"prompt": ["STRING", {"default": "", "multiline": true}], "negative_prompt": ["STRING", {"default": "", "multiline": true}], "duration": ["INT", {"default": 5, "min": 3, "max": 15, "display": "slider"}]}}}, {"key": "1 storyboard", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "2 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "3 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "4 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "5 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_5_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 5. Max 512 characters.", "default": "", "multiline": true}], "storyboard_5_duration": ["INT", {"tooltip": "Duration for storyboard segment 5 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}, {"key": "6 storyboards", "inputs": {"required": {"storyboard_1_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 1. Max 512 characters.", "default": "", "multiline": true}], "storyboard_1_duration": ["INT", {"tooltip": "Duration for storyboard segment 1 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_2_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 2. Max 512 characters.", "default": "", "multiline": true}], "storyboard_2_duration": ["INT", {"tooltip": "Duration for storyboard segment 2 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_3_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 3. Max 512 characters.", "default": "", "multiline": true}], "storyboard_3_duration": ["INT", {"tooltip": "Duration for storyboard segment 3 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_4_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 4. Max 512 characters.", "default": "", "multiline": true}], "storyboard_4_duration": ["INT", {"tooltip": "Duration for storyboard segment 4 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_5_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 5. Max 512 characters.", "default": "", "multiline": true}], "storyboard_5_duration": ["INT", {"tooltip": "Duration for storyboard segment 5 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}], "storyboard_6_prompt": ["STRING", {"tooltip": "Prompt for storyboard segment 6. Max 512 characters.", "default": "", "multiline": true}], "storyboard_6_duration": ["INT", {"tooltip": "Duration for storyboard segment 6 in seconds.", "default": 4, "min": 1, "max": 15, "display": "slider"}]}}}]}], "generate_audio": ["BOOLEAN", {"default": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model and generation settings.", "options": [{"key": "kling-v3", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["1080p", "720p"]}], "aspect_ratio": ["COMBO", {"tooltip": "Ignored in image-to-video mode.", "multiselect": false, "options": ["16:9", "9:16", "1:1"]}]}}}]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "optional": {"start_frame": ["IMAGE", {"tooltip": "Optional start frame image. When connected, switches to image-to-video mode."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["multi_shot", "generate_audio", "model", "seed"], "optional": ["start_frame"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingVideoNode", "display_name": "Kling 3.0 Video", "description": "Generate videos with Kling V3. Supports text-to-video and image-to-video with optional storyboard multi-prompt and audio generation.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model.resolution", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}, {"name": "multi_shot", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "multi_shot.duration", "type": "INT"}, {"name": "multi_shot.storyboard_1_duration", "type": "INT"}, {"name": "multi_shot.storyboard_2_duration", "type": "INT"}, {"name": "multi_shot.storyboard_3_duration", "type": "INT"}, {"name": "multi_shot.storyboard_4_duration", "type": "INT"}, {"name": "multi_shot.storyboard_5_duration", "type": "INT"}, {"name": "multi_shot.storyboard_6_duration", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $rates := {\"1080p\": {\"off\": 0.112, \"on\": 0.168}, \"720p\": {\"off\": 0.084, \"on\": 0.126}};\n $res := $lookup(widgets, \"model.resolution\");\n $audio := widgets.generate_audio ? \"on\" : \"off\";\n $rate := $lookup($lookup($rates, $res), $audio);\n $ms := widgets.multi_shot;\n $isSb := $ms != \"disabled\";\n $n := $isSb ? $number($substring($ms, 0, 1)) : 0;\n $d1 := $lookup(widgets, \"multi_shot.storyboard_1_duration\");\n $d2 := $n >= 2 ? $lookup(widgets, \"multi_shot.storyboard_2_duration\") : 0;\n $d3 := $n >= 3 ? $lookup(widgets, \"multi_shot.storyboard_3_duration\") : 0;\n $d4 := $n >= 4 ? $lookup(widgets, \"multi_shot.storyboard_4_duration\") : 0;\n $d5 := $n >= 5 ? $lookup(widgets, \"multi_shot.storyboard_5_duration\") : 0;\n $d6 := $n >= 6 ? $lookup(widgets, \"multi_shot.storyboard_6_duration\") : 0;\n $dur := $isSb ? $d1 + $d2 + $d3 + $d4 + $d5 + $d6 : $lookup(widgets, \"multi_shot.duration\");\n {\"type\":\"usd\",\"usd\": $rate * $dur}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingFirstLastFrameNode": {"input": {"required": {"prompt": ["STRING", {"default": "", "multiline": true}], "duration": ["INT", {"default": 5, "min": 3, "max": 15, "display": "slider"}], "first_frame": ["IMAGE", {}], "end_frame": ["IMAGE", {}], "generate_audio": ["BOOLEAN", {"default": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model and generation settings.", "options": [{"key": "kling-v3", "inputs": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["1080p", "720p"]}]}}}]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "duration", "first_frame", "end_frame", "generate_audio", "model", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingFirstLastFrameNode", "display_name": "Kling 3.0 First-Last-Frame to Video", "description": "Generate videos with Kling V3 using first and last frames.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model.resolution", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}, {"name": "duration", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $rates := {\"1080p\": {\"off\": 0.112, \"on\": 0.168}, \"720p\": {\"off\": 0.084, \"on\": 0.126}};\n $res := $lookup(widgets, \"model.resolution\");\n $audio := widgets.generate_audio ? \"on\" : \"off\";\n $rate := $lookup($lookup($rates, $res), $audio);\n {\"type\":\"usd\",\"usd\": $rate * widgets.duration}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "KlingAvatarNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "Avatar reference image. Width and height must be at least 300px. Aspect ratio must be between 1:2.5 and 2.5:1."}], "sound_file": ["AUDIO", {"tooltip": "Audio input. Must be between 2 and 300 seconds in duration."}], "mode": ["COMBO", {"multiselect": false, "options": ["std", "pro"]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "optional": {"prompt": ["STRING", {"tooltip": "Optional prompt to define avatar actions, emotions, and camera movements.", "default": "", "multiline": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "sound_file", "mode", "seed"], "optional": ["prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "KlingAvatarNode", "display_name": "Kling Avatar 2.0", "description": "Generate broadcast-style digital human videos from a single photo and an audio file.", "python_module": "comfy_api_nodes.nodes_kling", "category": "api node/video/Kling", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "mode", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\"std\": 0.056, \"pro\": 0.112};\n {\"type\":\"usd\",\"usd\": $lookup($prices, widgets.mode), \"format\":{\"suffix\":\"/second\"}}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LtxvApiTextToVideo": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["LTX-2 (Pro)", "LTX-2 (Fast)"]}], "prompt": ["STRING", {"default": "", "multiline": true}], "duration": ["COMBO", {"default": 8, "multiselect": false, "options": [6, 8, 10, 12, 14, 16, 18, 20]}], "resolution": ["COMBO", {"multiselect": false, "options": ["1920x1080", "2560x1440", "3840x2160"]}], "fps": ["COMBO", {"default": 25, "multiselect": false, "options": [25, 50]}]}, "optional": {"generate_audio": ["BOOLEAN", {"tooltip": "When true, the generated video will include AI-generated audio matching the scene.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "duration", "resolution", "fps"], "optional": ["generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "LtxvApiTextToVideo", "display_name": "LTXV Text To Video", "description": "Professional-quality videos with customizable duration and resolution.", "python_module": "comfy_api_nodes.nodes_ltxv", "category": "api node/video/LTXV", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "duration", "type": "COMBO"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\n \"ltx-2 (pro)\": {\"1920x1080\":0.06,\"2560x1440\":0.12,\"3840x2160\":0.24},\n \"ltx-2 (fast)\": {\"1920x1080\":0.04,\"2560x1440\":0.08,\"3840x2160\":0.16}\n };\n $modelPrices := $lookup($prices, $lowercase(widgets.model));\n $pps := $lookup($modelPrices, widgets.resolution);\n {\"type\":\"usd\",\"usd\": $pps * widgets.duration}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LtxvApiImageToVideo": {"input": {"required": {"image": ["IMAGE", {"tooltip": "First frame to be used for the video."}], "model": ["COMBO", {"multiselect": false, "options": ["LTX-2 (Pro)", "LTX-2 (Fast)"]}], "prompt": ["STRING", {"default": "", "multiline": true}], "duration": ["COMBO", {"default": 8, "multiselect": false, "options": [6, 8, 10, 12, 14, 16, 18, 20]}], "resolution": ["COMBO", {"multiselect": false, "options": ["1920x1080", "2560x1440", "3840x2160"]}], "fps": ["COMBO", {"default": 25, "multiselect": false, "options": [25, 50]}]}, "optional": {"generate_audio": ["BOOLEAN", {"tooltip": "When true, the generated video will include AI-generated audio matching the scene.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "model", "prompt", "duration", "resolution", "fps"], "optional": ["generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "LtxvApiImageToVideo", "display_name": "LTXV Image To Video", "description": "Professional-quality videos with customizable duration and resolution based on start image.", "python_module": "comfy_api_nodes.nodes_ltxv", "category": "api node/video/LTXV", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "duration", "type": "COMBO"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\n \"ltx-2 (pro)\": {\"1920x1080\":0.06,\"2560x1440\":0.12,\"3840x2160\":0.24},\n \"ltx-2 (fast)\": {\"1920x1080\":0.04,\"2560x1440\":0.08,\"3840x2160\":0.16}\n };\n $modelPrices := $lookup($prices, $lowercase(widgets.model));\n $pps := $lookup($modelPrices, widgets.resolution);\n {\"type\":\"usd\",\"usd\": $pps * widgets.duration}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LumaImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation", "default": "", "multiline": true}], "model": ["COMBO", {"multiselect": false, "options": ["photon-1", "photon-flash-1"]}], "aspect_ratio": ["COMBO", {"default": "16:9", "multiselect": false, "options": ["1:1", "16:9", "9:16", "4:3", "3:4", "21:9", "9:21"]}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "style_image_weight": ["FLOAT", {"tooltip": "Weight of style image. Ignored if no style_image provided.", "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"image_luma_ref": ["LUMA_REF", {"tooltip": "Luma Reference node connection to influence generation with input images; up to 4 images can be considered."}], "style_image": ["IMAGE", {"tooltip": "Style reference image; only 1 image will be used."}], "character_image": ["IMAGE", {"tooltip": "Character reference images; can be a batch of multiple, up to 4 images can be considered."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "aspect_ratio", "seed", "style_image_weight"], "optional": ["image_luma_ref", "style_image", "character_image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "LumaImageNode", "display_name": "Luma Text to Image", "description": "Generates images synchronously based on prompt and aspect ratio.", "python_module": "comfy_api_nodes.nodes_luma", "category": "api node/image/Luma", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $contains($m,\"photon-flash-1\")\n ? {\"type\":\"usd\",\"usd\":0.0027}\n : $contains($m,\"photon-1\")\n ? {\"type\":\"usd\",\"usd\":0.0104}\n : {\"type\":\"usd\",\"usd\":0.0246}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LumaImageModifyNode": {"input": {"required": {"image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt for the image generation", "default": "", "multiline": true}], "image_weight": ["FLOAT", {"tooltip": "Weight of the image; the closer to 1.0, the less the image will be modified.", "default": 0.1, "min": 0.0, "max": 0.98, "step": 0.01}], "model": ["COMBO", {"multiselect": false, "options": ["photon-1", "photon-flash-1"]}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "image_weight", "model", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "LumaImageModifyNode", "display_name": "Luma Image to Image", "description": "Modifies images synchronously based on prompt and aspect ratio.", "python_module": "comfy_api_nodes.nodes_luma", "category": "api node/image/Luma", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $contains($m,\"photon-flash-1\")\n ? {\"type\":\"usd\",\"usd\":0.0027}\n : $contains($m,\"photon-1\")\n ? {\"type\":\"usd\",\"usd\":0.0104}\n : {\"type\":\"usd\",\"usd\":0.0246}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LumaVideoNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the video generation", "default": "", "multiline": true}], "model": ["COMBO", {"multiselect": false, "options": ["ray-2", "ray-flash-2", "ray-1-6"]}], "aspect_ratio": ["COMBO", {"default": "16:9", "multiselect": false, "options": ["1:1", "16:9", "9:16", "4:3", "3:4", "21:9", "9:21"]}], "resolution": ["COMBO", {"default": "540p", "multiselect": false, "options": ["540p", "720p", "1080p", "4k"]}], "duration": ["COMBO", {"multiselect": false, "options": ["5s", "9s"]}], "loop": ["BOOLEAN", {"default": false}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"luma_concepts": ["LUMA_CONCEPTS", {"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "aspect_ratio", "resolution", "duration", "loop", "seed"], "optional": ["luma_concepts"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "LumaVideoNode", "display_name": "Luma Text to Video", "description": "Generates videos synchronously based on prompt and output_size.", "python_module": "comfy_api_nodes.nodes_luma", "category": "api node/video/Luma", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "resolution", "type": "COMBO"}, {"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $p := {\n \"ray-flash-2\": {\n \"5s\": {\"4k\":3.13,\"1080p\":0.79,\"720p\":0.34,\"540p\":0.2},\n \"9s\": {\"4k\":5.65,\"1080p\":1.42,\"720p\":0.61,\"540p\":0.36}\n },\n \"ray-2\": {\n \"5s\": {\"4k\":9.11,\"1080p\":2.27,\"720p\":1.02,\"540p\":0.57},\n \"9s\": {\"4k\":16.4,\"1080p\":4.1,\"720p\":1.83,\"540p\":1.03}\n }\n };\n\n $m := widgets.model;\n $d := widgets.duration;\n $r := widgets.resolution;\n\n $modelKey :=\n $contains($m,\"ray-flash-2\") ? \"ray-flash-2\" :\n $contains($m,\"ray-2\") ? \"ray-2\" :\n $contains($m,\"ray-1-6\") ? \"ray-1-6\" :\n \"other\";\n\n $durKey := $contains($d,\"5s\") ? \"5s\" : $contains($d,\"9s\") ? \"9s\" : \"\";\n $resKey :=\n $contains($r,\"4k\") ? \"4k\" :\n $contains($r,\"1080p\") ? \"1080p\" :\n $contains($r,\"720p\") ? \"720p\" :\n $contains($r,\"540p\") ? \"540p\" : \"\";\n\n $modelPrices := $lookup($p, $modelKey);\n $durPrices := $lookup($modelPrices, $durKey);\n $v := $lookup($durPrices, $resKey);\n\n $price :=\n ($modelKey = \"ray-1-6\") ? 0.5 :\n ($modelKey = \"other\") ? 0.79 :\n ($exists($v) ? $v : 0.79);\n\n {\"type\":\"usd\",\"usd\": $price}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LumaImageToVideoNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the video generation", "default": "", "multiline": true}], "model": ["COMBO", {"multiselect": false, "options": ["ray-2", "ray-flash-2", "ray-1-6"]}], "resolution": ["COMBO", {"default": "540p", "multiselect": false, "options": ["540p", "720p", "1080p", "4k"]}], "duration": ["COMBO", {"multiselect": false, "options": ["5s", "9s"]}], "loop": ["BOOLEAN", {"default": false}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"first_image": ["IMAGE", {"tooltip": "First frame of generated video."}], "last_image": ["IMAGE", {"tooltip": "Last frame of generated video."}], "luma_concepts": ["LUMA_CONCEPTS", {"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "resolution", "duration", "loop", "seed"], "optional": ["first_image", "last_image", "luma_concepts"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "LumaImageToVideoNode", "display_name": "Luma Image to Video", "description": "Generates videos synchronously based on prompt, input images, and output_size.", "python_module": "comfy_api_nodes.nodes_luma", "category": "api node/video/Luma", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "resolution", "type": "COMBO"}, {"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $p := {\n \"ray-flash-2\": {\n \"5s\": {\"4k\":3.13,\"1080p\":0.79,\"720p\":0.34,\"540p\":0.2},\n \"9s\": {\"4k\":5.65,\"1080p\":1.42,\"720p\":0.61,\"540p\":0.36}\n },\n \"ray-2\": {\n \"5s\": {\"4k\":9.11,\"1080p\":2.27,\"720p\":1.02,\"540p\":0.57},\n \"9s\": {\"4k\":16.4,\"1080p\":4.1,\"720p\":1.83,\"540p\":1.03}\n }\n };\n\n $m := widgets.model;\n $d := widgets.duration;\n $r := widgets.resolution;\n\n $modelKey :=\n $contains($m,\"ray-flash-2\") ? \"ray-flash-2\" :\n $contains($m,\"ray-2\") ? \"ray-2\" :\n $contains($m,\"ray-1-6\") ? \"ray-1-6\" :\n \"other\";\n\n $durKey := $contains($d,\"5s\") ? \"5s\" : $contains($d,\"9s\") ? \"9s\" : \"\";\n $resKey :=\n $contains($r,\"4k\") ? \"4k\" :\n $contains($r,\"1080p\") ? \"1080p\" :\n $contains($r,\"720p\") ? \"720p\" :\n $contains($r,\"540p\") ? \"540p\" : \"\";\n\n $modelPrices := $lookup($p, $modelKey);\n $durPrices := $lookup($modelPrices, $durKey);\n $v := $lookup($durPrices, $resKey);\n\n $price :=\n ($modelKey = \"ray-1-6\") ? 0.5 :\n ($modelKey = \"other\") ? 0.79 :\n ($exists($v) ? $v : 0.79);\n\n {\"type\":\"usd\",\"usd\": $price}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LumaReferenceNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "Image to use as reference."}], "weight": ["FLOAT", {"tooltip": "Weight of image reference.", "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"luma_ref": ["LUMA_REF", {}]}}, "input_order": {"required": ["image", "weight"], "optional": ["luma_ref"]}, "is_input_list": false, "output": ["LUMA_REF"], "output_is_list": [false], "output_name": ["luma_ref"], "output_tooltips": [null], "output_matchtypes": null, "name": "LumaReferenceNode", "display_name": "Luma Reference", "description": "Holds an image and weight for use with Luma Generate Image node.", "python_module": "comfy_api_nodes.nodes_luma", "category": "api node/image/Luma", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "LumaConceptsNode": {"input": {"required": {"concept1": ["COMBO", {"multiselect": false, "options": ["None", "truck_left", "pan_right", "pedestal_down", "low_angle", "pedestal_up", "selfie", "pan_left", "roll_right", "zoom_in", "over_the_shoulder", "orbit_right", "orbit_left", "static", "tiny_planet", "high_angle", "bolt_cam", "dolly_zoom", "overhead", "zoom_out", "handheld", "roll_left", "pov", "aerial_drone", "push_in", "crane_down", "truck_right", "tilt_down", "elevator_doors", "tilt_up", "ground_level", "pull_out", "aerial", "crane_up", "eye_level"]}], "concept2": ["COMBO", {"multiselect": false, "options": ["None", "truck_left", "pan_right", "pedestal_down", "low_angle", "pedestal_up", "selfie", "pan_left", "roll_right", "zoom_in", "over_the_shoulder", "orbit_right", "orbit_left", "static", "tiny_planet", "high_angle", "bolt_cam", "dolly_zoom", "overhead", "zoom_out", "handheld", "roll_left", "pov", "aerial_drone", "push_in", "crane_down", "truck_right", "tilt_down", "elevator_doors", "tilt_up", "ground_level", "pull_out", "aerial", "crane_up", "eye_level"]}], "concept3": ["COMBO", {"multiselect": false, "options": ["None", "truck_left", "pan_right", "pedestal_down", "low_angle", "pedestal_up", "selfie", "pan_left", "roll_right", "zoom_in", "over_the_shoulder", "orbit_right", "orbit_left", "static", "tiny_planet", "high_angle", "bolt_cam", "dolly_zoom", "overhead", "zoom_out", "handheld", "roll_left", "pov", "aerial_drone", "push_in", "crane_down", "truck_right", "tilt_down", "elevator_doors", "tilt_up", "ground_level", "pull_out", "aerial", "crane_up", "eye_level"]}], "concept4": ["COMBO", {"multiselect": false, "options": ["None", "truck_left", "pan_right", "pedestal_down", "low_angle", "pedestal_up", "selfie", "pan_left", "roll_right", "zoom_in", "over_the_shoulder", "orbit_right", "orbit_left", "static", "tiny_planet", "high_angle", "bolt_cam", "dolly_zoom", "overhead", "zoom_out", "handheld", "roll_left", "pov", "aerial_drone", "push_in", "crane_down", "truck_right", "tilt_down", "elevator_doors", "tilt_up", "ground_level", "pull_out", "aerial", "crane_up", "eye_level"]}]}, "optional": {"luma_concepts": ["LUMA_CONCEPTS", {"tooltip": "Optional Camera Concepts to add to the ones chosen here."}]}}, "input_order": {"required": ["concept1", "concept2", "concept3", "concept4"], "optional": ["luma_concepts"]}, "is_input_list": false, "output": ["LUMA_CONCEPTS"], "output_is_list": [false], "output_name": ["luma_concepts"], "output_tooltips": [null], "output_matchtypes": null, "name": "LumaConceptsNode", "display_name": "Luma Concepts", "description": "Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.", "python_module": "comfy_api_nodes.nodes_luma", "category": "api node/video/Luma", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MagnificImageUpscalerCreativeNode": {"input": {"required": {"image": ["IMAGE", {}], "prompt": ["STRING", {"default": "", "multiline": true}], "scale_factor": ["COMBO", {"multiselect": false, "options": ["2x", "4x", "8x", "16x"]}], "optimized_for": ["COMBO", {"multiselect": false, "options": ["standard", "soft_portraits", "hard_portraits", "art_n_illustration", "videogame_assets", "nature_n_landscapes", "films_n_photography", "3d_renders", "science_fiction_n_horror"]}], "creativity": ["INT", {"default": 0, "min": -10, "max": 10, "display": "slider"}], "hdr": ["INT", {"tooltip": "The level of definition and detail.", "default": 0, "min": -10, "max": 10, "display": "slider"}], "resemblance": ["INT", {"tooltip": "The level of resemblance to the original image.", "default": 0, "min": -10, "max": 10, "display": "slider"}], "fractality": ["INT", {"tooltip": "The strength of the prompt and intricacy per square pixel.", "default": 0, "min": -10, "max": 10, "display": "slider"}], "engine": ["COMBO", {"advanced": true, "multiselect": false, "options": ["automatic", "magnific_illusio", "magnific_sharpy", "magnific_sparkle"]}], "auto_downscale": ["BOOLEAN", {"tooltip": "Automatically downscale input image if output would exceed maximum pixel limit.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "scale_factor", "optimized_for", "creativity", "hdr", "resemblance", "fractality", "engine", "auto_downscale"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "MagnificImageUpscalerCreativeNode", "display_name": "Magnific Image Upscale (Creative)", "description": "Prompt\u2011guided enhancement, stylization, and 2x/4x/8x/16x upscaling. Maximum output: 25.3 megapixels.", "python_module": "comfy_api_nodes.nodes_magnific", "category": "api node/image/Magnific", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "scale_factor", "type": "COMBO"}, {"name": "auto_downscale", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $ad := widgets.auto_downscale;\n $mins := $ad\n ? {\"2x\": 0.172, \"4x\": 0.343, \"8x\": 0.515, \"16x\": 0.515}\n : {\"2x\": 0.172, \"4x\": 0.343, \"8x\": 0.515, \"16x\": 0.844};\n $maxs := {\"2x\": 0.515, \"4x\": 0.844, \"8x\": 1.015, \"16x\": 1.187};\n {\n \"type\": \"range_usd\",\n \"min_usd\": $lookup($mins, widgets.scale_factor),\n \"max_usd\": $lookup($maxs, widgets.scale_factor),\n \"format\": { \"approximate\": true }\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MagnificImageUpscalerPreciseV2Node": {"input": {"required": {"image": ["IMAGE", {}], "scale_factor": ["COMBO", {"multiselect": false, "options": ["2x", "4x", "8x", "16x"]}], "flavor": ["COMBO", {"tooltip": "Processing style: sublime for general use, photo for photographs, photo_denoiser for noisy photos.", "multiselect": false, "options": ["sublime", "photo", "photo_denoiser"]}], "sharpen": ["INT", {"tooltip": "Image sharpness intensity. Higher values increase edge definition and clarity.", "default": 7, "min": 0, "max": 100, "display": "slider"}], "smart_grain": ["INT", {"tooltip": "Intelligent grain/texture enhancement to prevent the image from looking too smooth or artificial.", "default": 7, "min": 0, "max": 100, "display": "slider"}], "ultra_detail": ["INT", {"tooltip": "Controls fine detail, textures, and micro-details added during upscaling.", "default": 30, "min": 0, "max": 100, "display": "slider"}], "auto_downscale": ["BOOLEAN", {"tooltip": "Automatically downscale input image if output would exceed maximum resolution.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "scale_factor", "flavor", "sharpen", "smart_grain", "ultra_detail", "auto_downscale"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "MagnificImageUpscalerPreciseV2Node", "display_name": "Magnific Image Upscale (Precise V2)", "description": "High-fidelity upscaling with fine control over sharpness, grain, and detail. Maximum output: 10060\u00d710060 pixels.", "python_module": "comfy_api_nodes.nodes_magnific", "category": "api node/image/Magnific", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "scale_factor", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $mins := {\"2x\": 0.172, \"4x\": 0.343, \"8x\": 0.515, \"16x\": 0.844};\n $maxs := {\"2x\": 2.045, \"4x\": 2.545, \"8x\": 2.889, \"16x\": 3.06};\n {\n \"type\": \"range_usd\",\n \"min_usd\": $lookup($mins, widgets.scale_factor),\n \"max_usd\": $lookup($maxs, widgets.scale_factor),\n \"format\": { \"approximate\": true }\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MagnificImageStyleTransferNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "The image to apply style transfer to."}], "reference_image": ["IMAGE", {"tooltip": "The reference image to extract style from."}], "prompt": ["STRING", {"default": "", "multiline": true}], "style_strength": ["INT", {"tooltip": "Percentage of style strength.", "default": 100, "min": 0, "max": 100, "display": "slider"}], "structure_strength": ["INT", {"tooltip": "Maintains the structure of the original image.", "default": 50, "min": 0, "max": 100, "display": "slider"}], "flavor": ["COMBO", {"tooltip": "Style transfer flavor.", "multiselect": false, "options": ["faithful", "gen_z", "psychedelia", "detaily", "clear", "donotstyle", "donotstyle_sharp"]}], "engine": ["COMBO", {"tooltip": "Processing engine selection.", "advanced": true, "multiselect": false, "options": ["balanced", "definio", "illusio", "3d_cartoon", "colorful_anime", "caricature", "real", "super_real", "softy"]}], "portrait_mode": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Enable portrait mode for facial enhancements.", "options": [{"key": "disabled", "inputs": {"required": {}}}, {"key": "enabled", "inputs": {"required": {"portrait_style": ["COMBO", {"tooltip": "Visual style applied to portrait images.", "multiselect": false, "options": ["standard", "pop", "super_pop"]}], "portrait_beautifier": ["COMBO", {"tooltip": "Facial beautification intensity on portraits.", "multiselect": false, "options": ["none", "beautify_face", "beautify_face_max"]}]}}}]}], "fixed_generation": ["BOOLEAN", {"tooltip": "When disabled, expect each generation to introduce a degree of randomness, leading to more diverse outcomes.", "advanced": true, "default": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "reference_image", "prompt", "style_strength", "structure_strength", "flavor", "engine", "portrait_mode", "fixed_generation"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "MagnificImageStyleTransferNode", "display_name": "Magnific Image Style Transfer", "description": "Transfer the style from a reference image to your input image.", "python_module": "comfy_api_nodes.nodes_magnific", "category": "api node/image/Magnific", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.11}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MagnificImageRelightNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "The image to relight."}], "prompt": ["STRING", {"tooltip": "Descriptive guidance for lighting. Supports emphasis notation (1-1.4).", "default": "", "multiline": true}], "light_transfer_strength": ["INT", {"tooltip": "Intensity of light transfer application.", "default": 100, "min": 0, "max": 100, "display": "slider"}], "style": ["COMBO", {"tooltip": "Stylistic output preference.", "multiselect": false, "options": ["standard", "darker_but_realistic", "clean", "smooth", "brighter", "contrasted_n_hdr", "just_composition"]}], "interpolate_from_original": ["BOOLEAN", {"tooltip": "Restricts generation freedom to match original more closely.", "advanced": true, "default": false}], "change_background": ["BOOLEAN", {"tooltip": "Modifies background based on prompt/reference.", "advanced": true, "default": true}], "preserve_details": ["BOOLEAN", {"tooltip": "Maintains texture and fine details from original.", "advanced": true, "default": true}], "advanced_settings": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Fine-tuning options for advanced lighting control.", "options": [{"key": "disabled", "inputs": {"required": {}}}, {"key": "enabled", "inputs": {"required": {"whites": ["INT", {"tooltip": "Adjusts the brightest tones in the image.", "default": 50, "min": 0, "max": 100, "display": "slider"}], "blacks": ["INT", {"tooltip": "Adjusts the darkest tones in the image.", "default": 50, "min": 0, "max": 100, "display": "slider"}], "brightness": ["INT", {"tooltip": "Overall brightness adjustment.", "default": 50, "min": 0, "max": 100, "display": "slider"}], "contrast": ["INT", {"tooltip": "Contrast adjustment.", "default": 50, "min": 0, "max": 100, "display": "slider"}], "saturation": ["INT", {"tooltip": "Color saturation adjustment.", "default": 50, "min": 0, "max": 100, "display": "slider"}], "engine": ["COMBO", {"tooltip": "Processing engine selection.", "multiselect": false, "options": ["automatic", "balanced", "cool", "real", "illusio", "fairy", "colorful_anime", "hard_transform", "softy"]}], "transfer_light_a": ["COMBO", {"tooltip": "The intensity of light transfer.", "multiselect": false, "options": ["automatic", "low", "medium", "normal", "high", "high_on_faces"]}], "transfer_light_b": ["COMBO", {"tooltip": "Also modifies light transfer intensity. Can be combined with the previous control for varied effects.", "multiselect": false, "options": ["automatic", "composition", "straight", "smooth_in", "smooth_out", "smooth_both", "reverse_both", "soft_in", "soft_out", "soft_mid", "style_shift", "strong_shift"]}], "fixed_generation": ["BOOLEAN", {"tooltip": "Ensures consistent output with the same settings.", "default": true}]}}}]}]}, "optional": {"reference_image": ["IMAGE", {"tooltip": "Optional reference image to transfer lighting from."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "light_transfer_strength", "style", "interpolate_from_original", "change_background", "preserve_details", "advanced_settings"], "optional": ["reference_image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "MagnificImageRelightNode", "display_name": "Magnific Image Relight", "description": "Relight an image with lighting adjustments and optional reference-based light transfer.", "python_module": "comfy_api_nodes.nodes_magnific", "category": "api node/image/Magnific", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.11}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MagnificImageSkinEnhancerNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "The portrait image to enhance."}], "sharpen": ["INT", {"tooltip": "Sharpening intensity level.", "default": 0, "min": 0, "max": 100, "display": "slider"}], "smart_grain": ["INT", {"tooltip": "Smart grain intensity level.", "default": 2, "min": 0, "max": 100, "display": "slider"}], "mode": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Processing mode: creative for artistic enhancement, faithful for preserving original appearance, flexible for targeted optimization.", "options": [{"key": "creative", "inputs": {"required": {}}}, {"key": "faithful", "inputs": {"required": {"skin_detail": ["INT", {"tooltip": "Skin detail enhancement level.", "default": 80, "min": 0, "max": 100, "display": "slider"}]}}}, {"key": "flexible", "inputs": {"required": {"optimized_for": ["COMBO", {"tooltip": "Enhancement optimization target.", "multiselect": false, "options": ["enhance_skin", "improve_lighting", "enhance_everything", "transform_to_real", "no_make_up"]}]}}}]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "sharpen", "smart_grain", "mode"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "MagnificImageSkinEnhancerNode", "display_name": "Magnific Image Skin Enhancer", "description": "Skin enhancement for portraits with multiple processing modes.", "python_module": "comfy_api_nodes.nodes_magnific", "category": "api node/image/Magnific", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "mode", "type": "COMFY_DYNAMICCOMBO_V3"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $rates := {\"creative\": 0.29, \"faithful\": 0.37, \"flexible\": 0.45};\n {\"type\":\"usd\",\"usd\": $lookup($rates, widgets.mode)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MeshyTextToModelNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["latest"]}], "prompt": ["STRING", {"default": "", "multiline": true}], "style": ["COMBO", {"multiselect": false, "options": ["realistic", "sculpture"]}], "should_remesh": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "When set to false, returns an unprocessed triangular mesh.", "options": [{"key": "true", "inputs": {"required": {"topology": ["COMBO", {"multiselect": false, "options": ["triangle", "quad"]}], "target_polycount": ["INT", {"default": 300000, "min": 100, "max": 300000, "display": "number"}]}}}, {"key": "false", "inputs": {"required": {}}}]}], "symmetry_mode": ["COMBO", {"advanced": true, "multiselect": false, "options": ["auto", "on", "off"]}], "pose_mode": ["COMBO", {"tooltip": "Specify the pose mode for the generated model.", "advanced": true, "multiselect": false, "options": ["", "A-pose", "T-pose"]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model", "prompt", "style", "should_remesh", "symmetry_mode", "pose_mode", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MESHY_TASK_ID", "FILE_3D_GLB", "FILE_3D_FBX"], "output_is_list": [false, false, false, false], "output_name": ["model_file", "meshy_task_id", "GLB", "FBX"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "MeshyTextToModelNode", "display_name": "Meshy: Text to Model", "description": "", "python_module": "comfy_api_nodes.nodes_meshy", "category": "api node/3d/Meshy", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.8}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MeshyRefineNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["latest"]}], "meshy_task_id": ["MESHY_TASK_ID", {}], "enable_pbr": ["BOOLEAN", {"tooltip": "Generate PBR Maps (metallic, roughness, normal) in addition to the base color. Note: this should be set to false when using Sculpture style, as Sculpture style generates its own set of PBR maps.", "advanced": true, "default": false}], "texture_prompt": ["STRING", {"tooltip": "Provide a text prompt to guide the texturing process. Maximum 600 characters. Cannot be used at the same time as 'texture_image'.", "default": "", "multiline": true}]}, "optional": {"texture_image": ["IMAGE", {"tooltip": "Only one of 'texture_image' or 'texture_prompt' may be used at the same time."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model", "meshy_task_id", "enable_pbr", "texture_prompt"], "optional": ["texture_image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MESHY_TASK_ID", "FILE_3D_GLB", "FILE_3D_FBX"], "output_is_list": [false, false, false, false], "output_name": ["model_file", "meshy_task_id", "GLB", "FBX"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "MeshyRefineNode", "display_name": "Meshy: Refine Draft Model", "description": "Refine a previously created draft model.", "python_module": "comfy_api_nodes.nodes_meshy", "category": "api node/3d/Meshy", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MeshyImageToModelNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["latest"]}], "image": ["IMAGE", {}], "should_remesh": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "When set to false, returns an unprocessed triangular mesh.", "options": [{"key": "true", "inputs": {"required": {"topology": ["COMBO", {"multiselect": false, "options": ["triangle", "quad"]}], "target_polycount": ["INT", {"default": 300000, "min": 100, "max": 300000, "display": "number"}]}}}, {"key": "false", "inputs": {"required": {}}}]}], "symmetry_mode": ["COMBO", {"multiselect": false, "options": ["auto", "on", "off"]}], "should_texture": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Determines whether textures are generated. Setting it to false skips the texture phase and returns a mesh without textures.", "options": [{"key": "true", "inputs": {"required": {"enable_pbr": ["BOOLEAN", {"tooltip": "Generate PBR Maps (metallic, roughness, normal) in addition to the base color.", "default": false}], "texture_prompt": ["STRING", {"tooltip": "Provide a text prompt to guide the texturing process. Maximum 600 characters. Cannot be used at the same time as 'texture_image'.", "default": "", "multiline": true}]}, "optional": {"texture_image": ["IMAGE", {"tooltip": "Only one of 'texture_image' or 'texture_prompt' may be used at the same time."}]}}}, {"key": "false", "inputs": {"required": {}}}]}], "pose_mode": ["COMBO", {"tooltip": "Specify the pose mode for the generated model.", "advanced": true, "multiselect": false, "options": ["", "A-pose", "T-pose"]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model", "image", "should_remesh", "symmetry_mode", "should_texture", "pose_mode", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MESHY_TASK_ID", "FILE_3D_GLB", "FILE_3D_FBX"], "output_is_list": [false, false, false, false], "output_name": ["model_file", "meshy_task_id", "GLB", "FBX"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "MeshyImageToModelNode", "display_name": "Meshy: Image to Model", "description": "", "python_module": "comfy_api_nodes.nodes_meshy", "category": "api node/3d/Meshy", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "should_texture", "type": "COMFY_DYNAMICCOMBO_V3"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\"true\": 1.2, \"false\": 0.8};\n {\"type\":\"usd\",\"usd\": $lookup($prices, widgets.should_texture)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MeshyMultiImageToModelNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["latest"]}], "images": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"image": ["IMAGE", {}]}}, "prefix": "image", "min": 2, "max": 4}}], "should_remesh": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "When set to false, returns an unprocessed triangular mesh.", "options": [{"key": "true", "inputs": {"required": {"topology": ["COMBO", {"multiselect": false, "options": ["triangle", "quad"]}], "target_polycount": ["INT", {"default": 300000, "min": 100, "max": 300000, "display": "number"}]}}}, {"key": "false", "inputs": {"required": {}}}]}], "symmetry_mode": ["COMBO", {"advanced": true, "multiselect": false, "options": ["auto", "on", "off"]}], "should_texture": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Determines whether textures are generated. Setting it to false skips the texture phase and returns a mesh without textures.", "options": [{"key": "true", "inputs": {"required": {"enable_pbr": ["BOOLEAN", {"tooltip": "Generate PBR Maps (metallic, roughness, normal) in addition to the base color.", "default": false}], "texture_prompt": ["STRING", {"tooltip": "Provide a text prompt to guide the texturing process. Maximum 600 characters. Cannot be used at the same time as 'texture_image'.", "default": "", "multiline": true}]}, "optional": {"texture_image": ["IMAGE", {"tooltip": "Only one of 'texture_image' or 'texture_prompt' may be used at the same time."}]}}}, {"key": "false", "inputs": {"required": {}}}]}], "pose_mode": ["COMBO", {"tooltip": "Specify the pose mode for the generated model.", "advanced": true, "multiselect": false, "options": ["", "A-pose", "T-pose"]}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model", "images", "should_remesh", "symmetry_mode", "should_texture", "pose_mode", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MESHY_TASK_ID", "FILE_3D_GLB", "FILE_3D_FBX"], "output_is_list": [false, false, false, false], "output_name": ["model_file", "meshy_task_id", "GLB", "FBX"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "MeshyMultiImageToModelNode", "display_name": "Meshy: Multi-Image to Model", "description": "", "python_module": "comfy_api_nodes.nodes_meshy", "category": "api node/3d/Meshy", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "should_texture", "type": "COMFY_DYNAMICCOMBO_V3"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\"true\": 0.6, \"false\": 0.2};\n {\"type\":\"usd\",\"usd\": $lookup($prices, widgets.should_texture)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MeshyRigModelNode": {"input": {"required": {"meshy_task_id": ["MESHY_TASK_ID", {}], "height_meters": ["FLOAT", {"tooltip": "The approximate height of the character model in meters. This aids in scaling and rigging accuracy.", "default": 1.7, "min": 0.1, "max": 15.0}]}, "optional": {"texture_image": ["IMAGE", {"tooltip": "The model's UV-unwrapped base color texture image."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["meshy_task_id", "height_meters"], "optional": ["texture_image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MESHY_RIGGED_TASK_ID", "FILE_3D_GLB", "FILE_3D_FBX"], "output_is_list": [false, false, false, false], "output_name": ["model_file", "rig_task_id", "GLB", "FBX"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "MeshyRigModelNode", "display_name": "Meshy: Rig Model", "description": "Provides a rigged character in standard formats. Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, or humanoid assets with unclear limb and body structure.", "python_module": "comfy_api_nodes.nodes_meshy", "category": "api node/3d/Meshy", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.2}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MeshyAnimateModelNode": {"input": {"required": {"rig_task_id": ["MESHY_RIGGED_TASK_ID", {}], "action_id": ["INT", {"tooltip": "Visit https://docs.meshy.ai/en/api/animation-library for a list of available values.", "default": 0, "min": 0, "max": 696}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["rig_task_id", "action_id"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "FILE_3D_GLB", "FILE_3D_FBX"], "output_is_list": [false, false, false], "output_name": ["model_file", "GLB", "FBX"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "MeshyAnimateModelNode", "display_name": "Meshy: Animate Model", "description": "Apply a specific animation action to a previously rigged character.", "python_module": "comfy_api_nodes.nodes_meshy", "category": "api node/3d/Meshy", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.12}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MeshyTextureNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["latest"]}], "meshy_task_id": ["MESHY_TASK_ID", {}], "enable_original_uv": ["BOOLEAN", {"tooltip": "Use the original UV of the model instead of generating new UVs. When enabled, Meshy preserves existing textures from the uploaded model. If the model has no original UV, the quality of the output might not be as good.", "advanced": true, "default": true}], "pbr": ["BOOLEAN", {"advanced": true, "default": false}], "text_style_prompt": ["STRING", {"tooltip": "Describe your desired texture style of the object using text. Maximum 600 characters.Maximum 600 characters. Cannot be used at the same time as 'image_style'.", "default": "", "multiline": true}]}, "optional": {"image_style": ["IMAGE", {"tooltip": "A 2d image to guide the texturing process. Can not be used at the same time with 'text_style_prompt'."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model", "meshy_task_id", "enable_original_uv", "pbr", "text_style_prompt"], "optional": ["image_style"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MODEL_TASK_ID", "FILE_3D_GLB", "FILE_3D_FBX"], "output_is_list": [false, false, false, false], "output_name": ["model_file", "meshy_task_id", "GLB", "FBX"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "MeshyTextureNode", "display_name": "Meshy: Texture Model", "description": "", "python_module": "comfy_api_nodes.nodes_meshy", "category": "api node/3d/Meshy", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MinimaxTextToVideoNode": {"input": {"required": {"prompt_text": ["STRING", {"tooltip": "Text prompt to guide the video generation", "default": "", "multiline": true}], "model": ["COMBO", {"tooltip": "Model to use for video generation", "default": "T2V-01", "multiselect": false, "options": ["T2V-01", "T2V-01-Director"]}]}, "optional": {"seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "step": 1, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt_text", "model"], "optional": ["seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "MinimaxTextToVideoNode", "display_name": "MiniMax Text to Video", "description": "Generates videos synchronously based on a prompt, and optional parameters.", "python_module": "comfy_api_nodes.nodes_minimax", "category": "api node/video/MiniMax", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.43}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MinimaxImageToVideoNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "Image to use as first frame of video generation"}], "prompt_text": ["STRING", {"tooltip": "Text prompt to guide the video generation", "default": "", "multiline": true}], "model": ["COMBO", {"tooltip": "Model to use for video generation", "default": "I2V-01", "multiselect": false, "options": ["I2V-01-Director", "I2V-01", "I2V-01-live"]}]}, "optional": {"seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "step": 1, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt_text", "model"], "optional": ["seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "MinimaxImageToVideoNode", "display_name": "MiniMax Image to Video", "description": "Generates videos synchronously based on an image and prompt, and optional parameters.", "python_module": "comfy_api_nodes.nodes_minimax", "category": "api node/video/MiniMax", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.43}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MinimaxHailuoVideoNode": {"input": {"required": {"prompt_text": ["STRING", {"tooltip": "Text prompt to guide the video generation.", "default": "", "multiline": true}]}, "optional": {"seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 18446744073709551615, "step": 1, "control_after_generate": true}], "first_frame_image": ["IMAGE", {"tooltip": "Optional image to use as the first frame to generate a video."}], "prompt_optimizer": ["BOOLEAN", {"tooltip": "Optimize prompt to improve generation quality when needed.", "default": true}], "duration": ["COMBO", {"tooltip": "The length of the output video in seconds.", "default": 6, "multiselect": false, "options": [6, 10]}], "resolution": ["COMBO", {"tooltip": "The dimensions of the video display. 1080p is 1920x1080, 768p is 1366x768.", "default": "768P", "multiselect": false, "options": ["768P", "1080P"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt_text"], "optional": ["seed", "first_frame_image", "prompt_optimizer", "duration", "resolution"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "MinimaxHailuoVideoNode", "display_name": "MiniMax Hailuo Video", "description": "Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.", "python_module": "comfy_api_nodes.nodes_minimax", "category": "api node/video/MiniMax", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "resolution", "type": "COMBO"}, {"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\n \"768p\": {\"6\": 0.28, \"10\": 0.56},\n \"1080p\": {\"6\": 0.49}\n };\n $resPrices := $lookup($prices, $lowercase(widgets.resolution));\n $price := $lookup($resPrices, $string(widgets.duration));\n {\"type\":\"usd\",\"usd\": $price ? $price : 0.43}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MoonvalleyImg2VideoNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "The reference image used to generate the video"}], "prompt": ["STRING", {"multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative prompt text", "default": " gopro, bright, contrast, static, overexposed, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring", "multiline": true}], "resolution": ["COMBO", {"tooltip": "Resolution of the output video", "default": "16:9 (1920 x 1080)", "multiselect": false, "options": ["16:9 (1920 x 1080)", "9:16 (1080 x 1920)", "1:1 (1152 x 1152)", "4:3 (1536 x 1152)", "3:4 (1152 x 1536)"]}], "prompt_adherence": ["FLOAT", {"tooltip": "Guidance scale for generation control", "default": 4.5, "min": 1.0, "max": 20.0, "step": 1.0}], "seed": ["INT", {"tooltip": "Random seed value", "default": 9, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": true, "display": "number"}], "steps": ["INT", {"tooltip": "Number of denoising steps", "default": 80, "min": 75, "max": 100, "step": 1}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "negative_prompt", "resolution", "prompt_adherence", "seed", "steps"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "MoonvalleyImg2VideoNode", "display_name": "Moonvalley Marey Image to Video", "description": "Moonvalley Marey Image to Video Node", "python_module": "comfy_api_nodes.nodes_moonvalley", "category": "api node/video/Moonvalley Marey", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 1.5}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MoonvalleyTxt2VideoNode": {"input": {"required": {"prompt": ["STRING", {"multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative prompt text", "default": " gopro, bright, contrast, static, overexposed, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring", "multiline": true}], "resolution": ["COMBO", {"tooltip": "Resolution of the output video", "default": "16:9 (1920 x 1080)", "multiselect": false, "options": ["16:9 (1920 x 1080)", "9:16 (1080 x 1920)", "1:1 (1152 x 1152)", "4:3 (1536 x 1152)", "3:4 (1152 x 1536)", "21:9 (2560 x 1080)"]}], "prompt_adherence": ["FLOAT", {"tooltip": "Guidance scale for generation control", "default": 4.0, "min": 1.0, "max": 20.0, "step": 1.0}], "seed": ["INT", {"tooltip": "Random seed value", "default": 9, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": true, "display": "number"}], "steps": ["INT", {"tooltip": "Inference steps", "default": 80, "min": 75, "max": 100, "step": 1}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "resolution", "prompt_adherence", "seed", "steps"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "MoonvalleyTxt2VideoNode", "display_name": "Moonvalley Marey Text to Video", "description": "", "python_module": "comfy_api_nodes.nodes_moonvalley", "category": "api node/video/Moonvalley Marey", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 1.5}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "MoonvalleyVideo2VideoNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Describes the video to generate", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative prompt text", "default": " gopro, bright, contrast, static, overexposed, vignette, artifacts, still, noise, texture, scanlines, videogame, 360 camera, VR, transition, flare, saturation, distorted, warped, wide angle, saturated, vibrant, glowing, cross dissolve, cheesy, ugly hands, mutated hands, mutant, disfigured, extra fingers, blown out, horrible, blurry, worst quality, bad, dissolve, melt, fade in, fade out, wobbly, weird, low quality, plastic, stock footage, video camera, boring", "multiline": true}], "seed": ["INT", {"tooltip": "Random seed value", "default": 9, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": false, "display": "number"}], "video": ["VIDEO", {"tooltip": "The reference video used to generate the output video. Must be at least 5 seconds long. Videos longer than 5s will be automatically trimmed. Only MP4 format supported."}], "steps": ["INT", {"tooltip": "Number of inference steps", "default": 60, "min": 60, "max": 100, "step": 1, "display": "number"}]}, "optional": {"control_type": ["COMBO", {"default": "Motion Transfer", "multiselect": false, "options": ["Motion Transfer", "Pose Transfer"]}], "motion_intensity": ["INT", {"tooltip": "Only used if control_type is 'Motion Transfer'", "default": 100, "min": 0, "max": 100, "step": 1}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "seed", "video", "steps"], "optional": ["control_type", "motion_intensity"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "MoonvalleyVideo2VideoNode", "display_name": "Moonvalley Marey Video to Video", "description": "", "python_module": "comfy_api_nodes.nodes_moonvalley", "category": "api node/video/Moonvalley Marey", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 2.25}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "OpenAIDalle2": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt for DALL\u00b7E", "default": "", "multiline": true}]}, "optional": {"seed": ["INT", {"tooltip": "not implemented yet in backend", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "size": ["COMBO", {"tooltip": "Image size", "default": "1024x1024", "multiselect": false, "options": ["256x256", "512x512", "1024x1024"]}], "n": ["INT", {"tooltip": "How many images to generate", "default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}], "image": ["IMAGE", {"tooltip": "Optional reference image for image editing."}], "mask": ["MASK", {"tooltip": "Optional mask for inpainting (white areas will be replaced)"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt"], "optional": ["seed", "size", "n", "image", "mask"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "OpenAIDalle2", "display_name": "OpenAI DALL\u00b7E 2", "description": "Generates images synchronously via OpenAI's DALL\u00b7E 2 endpoint.", "python_module": "comfy_api_nodes.nodes_openai", "category": "api node/image/OpenAI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "size", "type": "COMBO"}, {"name": "n", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $size := widgets.size;\n $nRaw := widgets.n;\n $n := ($nRaw != null and $nRaw != 0) ? $nRaw : 1;\n\n $base :=\n $contains($size, \"256x256\") ? 0.016 :\n $contains($size, \"512x512\") ? 0.018 :\n 0.02;\n\n {\"type\":\"usd\",\"usd\": $round($base * $n, 3)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "OpenAIDalle3": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt for DALL\u00b7E", "default": "", "multiline": true}]}, "optional": {"seed": ["INT", {"tooltip": "not implemented yet in backend", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "quality": ["COMBO", {"tooltip": "Image quality", "default": "standard", "multiselect": false, "options": ["standard", "hd"]}], "style": ["COMBO", {"tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.", "default": "natural", "multiselect": false, "options": ["natural", "vivid"]}], "size": ["COMBO", {"tooltip": "Image size", "default": "1024x1024", "multiselect": false, "options": ["1024x1024", "1024x1792", "1792x1024"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt"], "optional": ["seed", "quality", "style", "size"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "OpenAIDalle3", "display_name": "OpenAI DALL\u00b7E 3", "description": "Generates images synchronously via OpenAI's DALL\u00b7E 3 endpoint.", "python_module": "comfy_api_nodes.nodes_openai", "category": "api node/image/OpenAI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "size", "type": "COMBO"}, {"name": "quality", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $size := widgets.size;\n $q := widgets.quality;\n $hd := $contains($q, \"hd\");\n\n $price :=\n $contains($size, \"1024x1024\")\n ? ($hd ? 0.08 : 0.04)\n : (($contains($size, \"1792x1024\") or $contains($size, \"1024x1792\"))\n ? ($hd ? 0.12 : 0.08)\n : 0.04);\n\n {\"type\":\"usd\",\"usd\": $price}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "OpenAIGPTImage1": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt for GPT Image", "default": "", "multiline": true}]}, "optional": {"seed": ["INT", {"tooltip": "not implemented yet in backend", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "quality": ["COMBO", {"tooltip": "Image quality, affects cost and generation time.", "default": "low", "multiselect": false, "options": ["low", "medium", "high"]}], "background": ["COMBO", {"tooltip": "Return image with or without background", "default": "auto", "multiselect": false, "options": ["auto", "opaque", "transparent"]}], "size": ["COMBO", {"tooltip": "Image size", "default": "auto", "multiselect": false, "options": ["auto", "1024x1024", "1024x1536", "1536x1024"]}], "n": ["INT", {"tooltip": "How many images to generate", "default": 1, "min": 1, "max": 8, "step": 1, "display": "number"}], "image": ["IMAGE", {"tooltip": "Optional reference image for image editing."}], "mask": ["MASK", {"tooltip": "Optional mask for inpainting (white areas will be replaced)"}], "model": ["COMBO", {"default": "gpt-image-1.5", "multiselect": false, "options": ["gpt-image-1", "gpt-image-1.5"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt"], "optional": ["seed", "quality", "background", "size", "n", "image", "mask", "model"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "OpenAIGPTImage1", "display_name": "OpenAI GPT Image 1.5", "description": "Generates images synchronously via OpenAI's GPT Image endpoint.", "python_module": "comfy_api_nodes.nodes_openai", "category": "api node/image/OpenAI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "quality", "type": "COMBO"}, {"name": "n", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $ranges := {\n \"low\": [0.011, 0.02],\n \"medium\": [0.046, 0.07],\n \"high\": [0.167, 0.3]\n };\n $range := $lookup($ranges, widgets.quality);\n $n := widgets.n;\n ($n = 1)\n ? {\"type\":\"range_usd\",\"min_usd\": $range[0], \"max_usd\": $range[1]}\n : {\n \"type\":\"range_usd\",\n \"min_usd\": $range[0],\n \"max_usd\": $range[1],\n \"format\": { \"suffix\": \" x \" & $string($n) & \"/Run\" }\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "OpenAIChatNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text inputs to the model, used to generate a response.", "default": "", "multiline": true}], "persist_context": ["BOOLEAN", {"tooltip": "This parameter is deprecated and has no effect.", "advanced": true, "default": false}], "model": ["COMBO", {"tooltip": "The model used to generate the response", "multiselect": false, "options": ["o4-mini", "o1", "o3", "o1-pro", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-5", "gpt-5-mini", "gpt-5-nano"]}]}, "optional": {"images": ["IMAGE", {"tooltip": "Optional image(s) to use as context for the model. To include multiple images, you can use the Batch Images node."}], "files": ["OPENAI_INPUT_FILES", {"tooltip": "Optional file(s) to use as context for the model. Accepts inputs from the OpenAI Chat Input Files node."}], "advanced_options": ["OPENAI_CHAT_CONFIG", {"tooltip": "Optional configuration for the model. Accepts inputs from the OpenAI Chat Advanced Options node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "persist_context", "model"], "optional": ["images", "files", "advanced_options"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "OpenAIChatNode", "display_name": "OpenAI ChatGPT", "description": "Generate text responses from an OpenAI model.", "python_module": "comfy_api_nodes.nodes_openai", "category": "api node/text/OpenAI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $contains($m, \"o4-mini\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.0011, 0.0044],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"o1-pro\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.15, 0.6],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"o1\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.015, 0.06],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"o3-mini\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.0011, 0.0044],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"o3\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.01, 0.04],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"gpt-4.1-nano\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.0001, 0.0004],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"gpt-4.1-mini\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.0004, 0.0016],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"gpt-4.1\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.002, 0.008],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"gpt-5-nano\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.00005, 0.0004],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"gpt-5-mini\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.00025, 0.002],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : $contains($m, \"gpt-5\") ? {\n \"type\": \"list_usd\",\n \"usd\": [0.00125, 0.01],\n \"format\": { \"approximate\": true, \"separator\": \"-\", \"suffix\": \" per 1K tokens\" }\n }\n : {\"type\": \"text\", \"text\": \"Token-based\"}\n )\n "}, "search_aliases": null, "essentials_category": "Text Generation", "has_intermediate_output": false}, "OpenAIInputFiles": {"input": {"required": {"file": ["COMBO", {"tooltip": "Input files to include as context for the model. Only accepts text (.txt) and PDF (.pdf) files for now.", "multiselect": false, "options": []}]}, "optional": {"OPENAI_INPUT_FILES": ["OPENAI_INPUT_FILES", {"tooltip": "An optional additional file(s) to batch together with the file loaded from this node. Allows chaining of input files so that a single message can include multiple input files."}]}}, "input_order": {"required": ["file"], "optional": ["OPENAI_INPUT_FILES"]}, "is_input_list": false, "output": ["OPENAI_INPUT_FILES"], "output_is_list": [false], "output_name": ["OPENAI_INPUT_FILES"], "output_tooltips": [null], "output_matchtypes": null, "name": "OpenAIInputFiles", "display_name": "OpenAI ChatGPT Input Files", "description": "Loads and prepares input files (text, pdf, etc.) to include as inputs for the OpenAI Chat Node. The files will be read by the OpenAI model when generating a response. \ud83d\udec8 TIP: Can be chained together with other OpenAI Input File nodes.", "python_module": "comfy_api_nodes.nodes_openai", "category": "api node/text/OpenAI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "OpenAIChatConfig": {"input": {"required": {"truncation": ["COMBO", {"tooltip": "The truncation strategy to use for the model response. auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.disabled: If a model response will exceed the context window size for a model, the request will fail with a 400 error", "advanced": true, "default": "auto", "multiselect": false, "options": ["auto", "disabled"]}]}, "optional": {"max_output_tokens": ["INT", {"tooltip": "An upper bound for the number of tokens that can be generated for a response, including visible output tokens", "advanced": true, "default": 4096, "min": 16, "max": 16384}], "instructions": ["STRING", {"tooltip": "Instructions for the model on how to generate the response", "multiline": true}]}}, "input_order": {"required": ["truncation"], "optional": ["max_output_tokens", "instructions"]}, "is_input_list": false, "output": ["OPENAI_CHAT_CONFIG"], "output_is_list": [false], "output_name": ["OPENAI_CHAT_CONFIG"], "output_tooltips": [null], "output_matchtypes": null, "name": "OpenAIChatConfig", "display_name": "OpenAI ChatGPT Advanced Options", "description": "Allows specifying advanced configuration options for the OpenAI Chat Nodes.", "python_module": "comfy_api_nodes.nodes_openai", "category": "api node/text/OpenAI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PixverseTextToVideoNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the video generation", "default": "", "multiline": true}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "4:3", "1:1", "3:4", "9:16"]}], "quality": ["COMBO", {"default": "540p", "multiselect": false, "options": ["360p", "540p", "720p", "1080p"]}], "duration_seconds": ["COMBO", {"multiselect": false, "options": [5, 8]}], "motion_mode": ["COMBO", {"multiselect": false, "options": ["normal", "fast"]}], "seed": ["INT", {"tooltip": "Seed for video generation.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "default": "", "multiline": true}], "pixverse_template": ["PIXVERSE_TEMPLATE", {"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "aspect_ratio", "quality", "duration_seconds", "motion_mode", "seed"], "optional": ["negative_prompt", "pixverse_template"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "PixverseTextToVideoNode", "display_name": "PixVerse Text to Video", "description": "Generates videos based on prompt and output_size.", "python_module": "comfy_api_nodes.nodes_pixverse", "category": "api node/video/PixVerse", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration_seconds", "type": "COMBO"}, {"name": "quality", "type": "COMBO"}, {"name": "motion_mode", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\n \"5\": {\n \"1080p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"720p\": {\"normal\": 0.6, \"fast\": 1.2},\n \"540p\": {\"normal\": 0.45, \"fast\": 0.9},\n \"360p\": {\"normal\": 0.45, \"fast\": 0.9}\n },\n \"8\": {\n \"1080p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"720p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"540p\": {\"normal\": 0.9, \"fast\": 1.2},\n \"360p\": {\"normal\": 0.9, \"fast\": 1.2}\n }\n };\n $durPrices := $lookup($prices, $string(widgets.duration_seconds));\n $qualityPrices := $lookup($durPrices, widgets.quality);\n $price := $lookup($qualityPrices, widgets.motion_mode);\n {\"type\":\"usd\",\"usd\": $price ? $price : 0.9}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PixverseImageToVideoNode": {"input": {"required": {"image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt for the video generation", "default": "", "multiline": true}], "quality": ["COMBO", {"default": "540p", "multiselect": false, "options": ["360p", "540p", "720p", "1080p"]}], "duration_seconds": ["COMBO", {"multiselect": false, "options": [5, 8]}], "motion_mode": ["COMBO", {"multiselect": false, "options": ["normal", "fast"]}], "seed": ["INT", {"tooltip": "Seed for video generation.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "default": "", "multiline": true}], "pixverse_template": ["PIXVERSE_TEMPLATE", {"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "quality", "duration_seconds", "motion_mode", "seed"], "optional": ["negative_prompt", "pixverse_template"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "PixverseImageToVideoNode", "display_name": "PixVerse Image to Video", "description": "Generates videos based on prompt and output_size.", "python_module": "comfy_api_nodes.nodes_pixverse", "category": "api node/video/PixVerse", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration_seconds", "type": "COMBO"}, {"name": "quality", "type": "COMBO"}, {"name": "motion_mode", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\n \"5\": {\n \"1080p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"720p\": {\"normal\": 0.6, \"fast\": 1.2},\n \"540p\": {\"normal\": 0.45, \"fast\": 0.9},\n \"360p\": {\"normal\": 0.45, \"fast\": 0.9}\n },\n \"8\": {\n \"1080p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"720p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"540p\": {\"normal\": 0.9, \"fast\": 1.2},\n \"360p\": {\"normal\": 0.9, \"fast\": 1.2}\n }\n };\n $durPrices := $lookup($prices, $string(widgets.duration_seconds));\n $qualityPrices := $lookup($durPrices, widgets.quality);\n $price := $lookup($qualityPrices, widgets.motion_mode);\n {\"type\":\"usd\",\"usd\": $price ? $price : 0.9}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PixverseTransitionVideoNode": {"input": {"required": {"first_frame": ["IMAGE", {}], "last_frame": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt for the video generation", "default": "", "multiline": true}], "quality": ["COMBO", {"default": "540p", "multiselect": false, "options": ["360p", "540p", "720p", "1080p"]}], "duration_seconds": ["COMBO", {"multiselect": false, "options": [5, 8]}], "motion_mode": ["COMBO", {"multiselect": false, "options": ["normal", "fast"]}], "seed": ["INT", {"tooltip": "Seed for video generation.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "default": "", "multiline": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["first_frame", "last_frame", "prompt", "quality", "duration_seconds", "motion_mode", "seed"], "optional": ["negative_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "PixverseTransitionVideoNode", "display_name": "PixVerse Transition Video", "description": "Generates videos based on prompt and output_size.", "python_module": "comfy_api_nodes.nodes_pixverse", "category": "api node/video/PixVerse", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration_seconds", "type": "COMBO"}, {"name": "quality", "type": "COMBO"}, {"name": "motion_mode", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\n \"5\": {\n \"1080p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"720p\": {\"normal\": 0.6, \"fast\": 1.2},\n \"540p\": {\"normal\": 0.45, \"fast\": 0.9},\n \"360p\": {\"normal\": 0.45, \"fast\": 0.9}\n },\n \"8\": {\n \"1080p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"720p\": {\"normal\": 1.2, \"fast\": 1.2},\n \"540p\": {\"normal\": 0.9, \"fast\": 1.2},\n \"360p\": {\"normal\": 0.9, \"fast\": 1.2}\n }\n };\n $durPrices := $lookup($prices, $string(widgets.duration_seconds));\n $qualityPrices := $lookup($durPrices, widgets.quality);\n $price := $lookup($qualityPrices, widgets.motion_mode);\n {\"type\":\"usd\",\"usd\": $price ? $price : 0.9}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "PixverseTemplateNode": {"input": {"required": {"template": ["COMBO", {"multiselect": false, "options": ["Microwave", "Suit Swagger", "Anything, Robot", "Subject 3 Fever", "kiss kiss"]}]}}, "input_order": {"required": ["template"]}, "is_input_list": false, "output": ["PIXVERSE_TEMPLATE"], "output_is_list": [false], "output_name": ["pixverse_template"], "output_tooltips": [null], "output_matchtypes": null, "name": "PixverseTemplateNode", "display_name": "PixVerse Template", "description": "", "python_module": "comfy_api_nodes.nodes_pixverse", "category": "api node/video/PixVerse", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "QuiverTextToSVGNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text description of the desired SVG output.", "default": "", "multiline": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for SVG generation.", "options": [{"key": "arrow-preview", "inputs": {"required": {"temperature": ["FLOAT", {"tooltip": "Randomness control. Higher values increase randomness.", "advanced": true, "default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1, "display": "slider"}], "top_p": ["FLOAT", {"tooltip": "Nucleus sampling parameter.", "advanced": true, "default": 1.0, "min": 0.05, "max": 1.0, "step": 0.05, "display": "slider"}], "presence_penalty": ["FLOAT", {"tooltip": "Token presence penalty.", "advanced": true, "default": 0.0, "min": -2.0, "max": 2.0, "step": 0.1, "display": "slider"}]}}}]}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true}]}, "optional": {"instructions": ["STRING", {"tooltip": "Additional style or formatting guidance.", "default": "", "multiline": true}], "reference_images": ["COMFY_AUTOGROW_V3", {"tooltip": "Up to 4 reference images to guide the generation.", "template": {"input": {"required": {"image": ["IMAGE", {}]}}, "prefix": "ref_", "min": 0, "max": 4}}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "seed"], "optional": ["instructions", "reference_images"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["SVG"], "output_is_list": [false], "output_name": ["SVG"], "output_tooltips": [null], "output_matchtypes": null, "name": "QuiverTextToSVGNode", "display_name": "Quiver Text to SVG", "description": "Generate an SVG from a text prompt using Quiver AI.", "python_module": "comfy_api_nodes.nodes_quiver", "category": "api node/image/Quiver", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.429}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "QuiverImageToSVGNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "Input image to vectorize."}], "auto_crop": ["BOOLEAN", {"tooltip": "Automatically crop to the dominant subject.", "default": false}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for SVG vectorization.", "options": [{"key": "arrow-preview", "inputs": {"required": {"target_size": ["INT", {"tooltip": "Square resize target in pixels.", "default": 1024, "min": 128, "max": 4096}], "temperature": ["FLOAT", {"tooltip": "Randomness control. Higher values increase randomness.", "advanced": true, "default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1, "display": "slider"}], "top_p": ["FLOAT", {"tooltip": "Nucleus sampling parameter.", "advanced": true, "default": 1.0, "min": 0.05, "max": 1.0, "step": 0.05, "display": "slider"}], "presence_penalty": ["FLOAT", {"tooltip": "Token presence penalty.", "advanced": true, "default": 0.0, "min": -2.0, "max": 2.0, "step": 0.1, "display": "slider"}]}}}]}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "auto_crop", "model", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["SVG"], "output_is_list": [false], "output_name": ["SVG"], "output_tooltips": [null], "output_matchtypes": null, "name": "QuiverImageToSVGNode", "display_name": "Quiver Image to SVG", "description": "Vectorize a raster image into SVG using Quiver AI.", "python_module": "comfy_api_nodes.nodes_quiver", "category": "api node/image/Quiver", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.429}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftTextToImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation.", "default": "", "multiline": true}], "size": ["COMBO", {"tooltip": "The size of the generated image.", "default": "1024x1024", "multiselect": false, "options": ["1024x1024", "1365x1024", "1024x1365", "1536x1024", "1024x1536", "1820x1024", "1024x1820", "1024x2048", "2048x1024", "1434x1024", "1024x1434", "1024x1280", "1280x1024", "1024x1707", "1707x1024"]}], "n": ["INT", {"tooltip": "The number of images to generate.", "default": 1, "min": 1, "max": 6}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"recraft_style": ["RECRAFT_V3_STYLE", {}], "negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "default": "", "forceInput": true, "multiline": false}], "recraft_controls": ["RECRAFT_CONTROLS", {"tooltip": "Optional additional controls over the generation via the Recraft Controls node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "size", "n", "seed"], "optional": ["recraft_style", "negative_prompt", "recraft_controls"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftTextToImageNode", "display_name": "Recraft Text to Image", "description": "Generates images synchronously based on prompt and resolution.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "n", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": $round(0.04 * widgets.n, 2)}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftImageToImageNode": {"input": {"required": {"image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt for the image generation.", "default": "", "multiline": true}], "n": ["INT", {"tooltip": "The number of images to generate.", "default": 1, "min": 1, "max": 6}], "strength": ["FLOAT", {"tooltip": "Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"recraft_style": ["RECRAFT_V3_STYLE", {}], "negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "default": "", "forceInput": true, "multiline": false}], "recraft_controls": ["RECRAFT_CONTROLS", {"tooltip": "Optional additional controls over the generation via the Recraft Controls node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "n", "strength", "seed"], "optional": ["recraft_style", "negative_prompt", "recraft_controls"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftImageToImageNode", "display_name": "Recraft Image to Image", "description": "Modify image based on prompt and strength.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "n", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": $round(0.04 * widgets.n, 2)}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftImageInpaintingNode": {"input": {"required": {"image": ["IMAGE", {}], "mask": ["MASK", {}], "prompt": ["STRING", {"tooltip": "Prompt for the image generation.", "default": "", "multiline": true}], "n": ["INT", {"tooltip": "The number of images to generate.", "default": 1, "min": 1, "max": 6}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"recraft_style": ["RECRAFT_V3_STYLE", {}], "negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "default": "", "forceInput": true, "multiline": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "mask", "prompt", "n", "seed"], "optional": ["recraft_style", "negative_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftImageInpaintingNode", "display_name": "Recraft Image Inpainting", "description": "Modify image based on prompt and mask.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "n", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": $round(0.04 * widgets.n, 2)}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftTextToVectorNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation.", "default": "", "multiline": true}], "substyle": ["COMBO", {"multiselect": false, "options": ["None", "bold_stroke", "chemistry", "colored_stencil", "contour_pop_art", "cosmics", "cutout", "depressive", "editorial", "emotional_flat", "engraving", "infographical", "line_art", "line_circuit", "linocut", "marker_outline", "mosaic", "naivector", "roundish_flat", "seamless", "segmented_colors", "sharp_contrast", "thin", "vector_photo", "vivid_shapes"]}], "size": ["COMBO", {"tooltip": "The size of the generated image.", "default": "1024x1024", "multiselect": false, "options": ["1024x1024", "1365x1024", "1024x1365", "1536x1024", "1024x1536", "1820x1024", "1024x1820", "1024x2048", "2048x1024", "1434x1024", "1024x1434", "1024x1280", "1280x1024", "1024x1707", "1707x1024"]}], "n": ["INT", {"tooltip": "The number of images to generate.", "default": 1, "min": 1, "max": 6}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "default": "", "forceInput": true, "multiline": false}], "recraft_controls": ["RECRAFT_CONTROLS", {"tooltip": "Optional additional controls over the generation via the Recraft Controls node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "substyle", "size", "n", "seed"], "optional": ["negative_prompt", "recraft_controls"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["SVG"], "output_is_list": [false], "output_name": ["SVG"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftTextToVectorNode", "display_name": "Recraft Text to Vector", "description": "Generates SVG synchronously based on prompt and resolution.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "n", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": $round(0.08 * widgets.n, 2)}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftVectorizeImageNode": {"input": {"required": {"image": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["SVG"], "output_is_list": [false], "output_name": ["SVG"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftVectorizeImageNode", "display_name": "Recraft Vectorize Image", "description": "Generates SVG synchronously from an input image.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.01}"}, "search_aliases": null, "essentials_category": "Image Tools", "has_intermediate_output": false}, "RecraftRemoveBackgroundNode": {"input": {"required": {"image": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "RecraftRemoveBackgroundNode", "display_name": "Recraft Remove Background", "description": "Remove background from image, and return processed image and mask.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.01}"}, "search_aliases": null, "essentials_category": "Image Tools", "has_intermediate_output": false}, "RecraftReplaceBackgroundNode": {"input": {"required": {"image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt for the image generation.", "default": "", "multiline": true}], "n": ["INT", {"tooltip": "The number of images to generate.", "default": 1, "min": 1, "max": 6}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"recraft_style": ["RECRAFT_V3_STYLE", {}], "negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "default": "", "forceInput": true, "multiline": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "n", "seed"], "optional": ["recraft_style", "negative_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftReplaceBackgroundNode", "display_name": "Recraft Replace Background", "description": "Replace background on image, based on provided prompt.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.04}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftCrispUpscaleNode": {"input": {"required": {"image": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftCrispUpscaleNode", "display_name": "Recraft Crisp Upscale Image", "description": "Upscale image synchronously.\nEnhances a given raster image using \u2018crisp upscale\u2019 tool, increasing image resolution, making the image sharper and cleaner.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.004}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftCreativeUpscaleNode": {"input": {"required": {"image": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftCreativeUpscaleNode", "display_name": "Recraft Creative Upscale Image", "description": "Upscale image synchronously.\nEnhances a given raster image using \u2018creative upscale\u2019 tool, boosting resolution with a focus on refining small details and faces.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.25}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftStyleV3RealisticImage": {"input": {"required": {"substyle": ["COMBO", {"multiselect": false, "options": ["None", "b_and_w", "enterprise", "evening_light", "faded_nostalgia", "forest_life", "hard_flash", "hdr", "motion_blur", "mystic_naturalism", "natural_light", "natural_tones", "organic_calm", "real_life_glow", "retro_realism", "retro_snapshot", "studio_portrait", "urban_drama", "village_realism", "warm_folk"]}]}}, "input_order": {"required": ["substyle"]}, "is_input_list": false, "output": ["RECRAFT_V3_STYLE"], "output_is_list": [false], "output_name": ["recraft_style"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftStyleV3RealisticImage", "display_name": "Recraft Style - Realistic Image", "description": "Select realistic_image style and optional substyle.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftStyleV3DigitalIllustration": {"input": {"required": {"substyle": ["COMBO", {"multiselect": false, "options": ["None", "2d_art_poster", "2d_art_poster_2", "antiquarian", "bold_fantasy", "child_book", "child_books", "cover", "crosshatch", "digital_engraving", "engraving_color", "expressionism", "freehand_details", "grain", "grain_20", "graphic_intensity", "hand_drawn", "hand_drawn_outline", "handmade_3d", "hard_comics", "infantile_sketch", "long_shadow", "modern_folk", "multicolor", "neon_calm", "noir", "nostalgic_pastel", "outline_details", "pastel_gradient", "pastel_sketch", "pixel_art", "plastic", "pop_art", "pop_renaissance", "seamless", "street_art", "tablet_sketch", "urban_glow", "urban_sketching", "vanilla_dreams", "young_adult_book", "young_adult_book_2"]}]}}, "input_order": {"required": ["substyle"]}, "is_input_list": false, "output": ["RECRAFT_V3_STYLE"], "output_is_list": [false], "output_name": ["recraft_style"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftStyleV3DigitalIllustration", "display_name": "Recraft Style - Digital Illustration", "description": "Select realistic_image style and optional substyle.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftStyleV3LogoRaster": {"input": {"required": {"substyle": ["COMBO", {"multiselect": false, "options": ["emblem_graffiti", "emblem_pop_art", "emblem_punk", "emblem_stamp", "emblem_vintage"]}]}}, "input_order": {"required": ["substyle"]}, "is_input_list": false, "output": ["RECRAFT_V3_STYLE"], "output_is_list": [false], "output_name": ["recraft_style"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftStyleV3LogoRaster", "display_name": "Recraft Style - Logo Raster", "description": "Select realistic_image style and optional substyle.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftStyleV3InfiniteStyleLibrary": {"input": {"required": {"style_id": ["STRING", {"tooltip": "UUID of style from Infinite Style Library.", "default": "", "multiline": false}]}}, "input_order": {"required": ["style_id"]}, "is_input_list": false, "output": ["RECRAFT_V3_STYLE"], "output_is_list": [false], "output_name": ["recraft_style"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftStyleV3InfiniteStyleLibrary", "display_name": "Recraft Style - Infinite Style Library", "description": "Choose style based on preexisting UUID from Recraft's Infinite Style Library.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftCreateStyleNode": {"input": {"required": {"style": ["COMBO", {"tooltip": "The base style of the generated images.", "multiselect": false, "options": ["realistic_image", "digital_illustration"]}], "images": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"image": ["IMAGE", {}]}}, "prefix": "image", "min": 1, "max": 5}}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["style", "images"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["style_id"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftCreateStyleNode", "display_name": "Recraft Create Style", "description": "Create a custom style from reference images. Upload 1-5 images to use as style references. Total size of all images is limited to 5 MB.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.04}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftColorRGB": {"input": {"required": {"r": ["INT", {"tooltip": "Red value of color.", "default": 0, "min": 0, "max": 255}], "g": ["INT", {"tooltip": "Green value of color.", "default": 0, "min": 0, "max": 255}], "b": ["INT", {"tooltip": "Blue value of color.", "default": 0, "min": 0, "max": 255}]}, "optional": {"recraft_color": ["RECRAFT_COLOR", {}]}}, "input_order": {"required": ["r", "g", "b"], "optional": ["recraft_color"]}, "is_input_list": false, "output": ["RECRAFT_COLOR"], "output_is_list": [false], "output_name": ["recraft_color"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftColorRGB", "display_name": "Recraft Color RGB", "description": "Create Recraft Color by choosing specific RGB values.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftControls": {"input": {"required": {}, "optional": {"colors": ["RECRAFT_COLOR", {}], "background_color": ["RECRAFT_COLOR", {}]}}, "input_order": {"required": [], "optional": ["colors", "background_color"]}, "is_input_list": false, "output": ["RECRAFT_CONTROLS"], "output_is_list": [false], "output_name": ["recraft_controls"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftControls", "display_name": "Recraft Controls", "description": "Create Recraft Controls for customizing Recraft generation.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftV4TextToImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation. Maximum 10,000 characters.", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "multiline": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "The model to use for generation.", "options": [{"key": "recraftv4", "inputs": {"required": {"size": ["COMBO", {"tooltip": "The size of the generated image.", "default": "1024x1024", "multiselect": false, "options": ["1024x1024", "1536x768", "768x1536", "1280x832", "832x1280", "1216x896", "896x1216", "1152x896", "896x1152", "832x1344", "1280x896", "896x1280", "1344x768", "768x1344"]}]}}}, {"key": "recraftv4_pro", "inputs": {"required": {"size": ["COMBO", {"tooltip": "The size of the generated image.", "default": "2048x2048", "multiselect": false, "options": ["2048x2048", "3072x1536", "1536x3072", "2560x1664", "1664x2560", "2432x1792", "1792x2432", "2304x1792", "1792x2304", "1664x2688", "1434x1024", "1024x1434", "2560x1792", "1792x2560"]}]}}}]}], "n": ["INT", {"tooltip": "The number of images to generate.", "default": 1, "min": 1, "max": 6}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"recraft_controls": ["RECRAFT_CONTROLS", {"tooltip": "Optional additional controls over the generation via the Recraft Controls node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "model", "n", "seed"], "optional": ["recraft_controls"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftV4TextToImageNode", "display_name": "Recraft V4 Text to Image", "description": "Generates images using Recraft V4 or V4 Pro models.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "n", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\"recraftv4\": 0.04, \"recraftv4_pro\": 0.25};\n {\"type\":\"usd\",\"usd\": $lookup($prices, widgets.model) * widgets.n}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RecraftV4TextToVectorNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Prompt for the image generation. Maximum 10,000 characters.", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "An optional text description of undesired elements on an image.", "multiline": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "The model to use for generation.", "options": [{"key": "recraftv4", "inputs": {"required": {"size": ["COMBO", {"tooltip": "The size of the generated image.", "default": "1024x1024", "multiselect": false, "options": ["1024x1024", "1536x768", "768x1536", "1280x832", "832x1280", "1216x896", "896x1216", "1152x896", "896x1152", "832x1344", "1280x896", "896x1280", "1344x768", "768x1344"]}]}}}, {"key": "recraftv4_pro", "inputs": {"required": {"size": ["COMBO", {"tooltip": "The size of the generated image.", "default": "2048x2048", "multiselect": false, "options": ["2048x2048", "3072x1536", "1536x3072", "2560x1664", "1664x2560", "2432x1792", "1792x2432", "2304x1792", "1792x2304", "1664x2688", "1434x1024", "1024x1434", "2560x1792", "1792x2560"]}]}}}]}], "n": ["INT", {"tooltip": "The number of images to generate.", "default": 1, "min": 1, "max": 6}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}]}, "optional": {"recraft_controls": ["RECRAFT_CONTROLS", {"tooltip": "Optional additional controls over the generation via the Recraft Controls node."}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "model", "n", "seed"], "optional": ["recraft_controls"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["SVG"], "output_is_list": [false], "output_name": ["SVG"], "output_tooltips": [null], "output_matchtypes": null, "name": "RecraftV4TextToVectorNode", "display_name": "Recraft V4 Text to Vector", "description": "Generates SVG using Recraft V4 or V4 Pro models.", "python_module": "comfy_api_nodes.nodes_recraft", "category": "api node/image/Recraft", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "n", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\"recraftv4\": 0.08, \"recraftv4_pro\": 0.30};\n {\"type\":\"usd\",\"usd\": $lookup($prices, widgets.model) * widgets.n}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ReveImageCreateNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text description of the desired image. Maximum 2560 characters.", "default": "", "multiline": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model version to use for generation.", "options": [{"key": "reve-create@20250915", "inputs": {"required": {"aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of the output image.", "multiselect": false, "options": ["3:2", "16:9", "9:16", "2:3", "4:3", "3:4", "1:1"]}], "test_time_scaling": ["INT", {"tooltip": "Higher values produce better images but cost more credits.", "advanced": true, "default": 1, "min": 1, "max": 5, "step": 1}]}}}]}], "upscale": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Upscale the generated image. May add additional cost.", "options": [{"key": "disabled", "inputs": {"required": {}}}, {"key": "enabled", "inputs": {"required": {"upscale_factor": ["INT", {"tooltip": "Upscale factor (2x, 3x, or 4x).", "default": 2, "min": 2, "max": 4, "step": 1}]}}}]}], "remove_background": ["BOOLEAN", {"tooltip": "Remove the background from the generated image. May add additional cost.", "default": false}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "upscale", "remove_background", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ReveImageCreateNode", "display_name": "Reve Image Create", "description": "Generate images from text descriptions using Reve.", "python_module": "comfy_api_nodes.nodes_reve", "category": "api node/image/Reve", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "upscale", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "upscale.upscale_factor", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $factor := $lookup(widgets, \"upscale.upscale_factor\");\n $fmt := {\"approximate\": true, \"note\": \"(base)\"};\n widgets.upscale = \"enabled\" ? (\n $factor = 4 ? {\"type\": \"usd\", \"usd\": 0.0762, \"format\": $fmt}\n : $factor = 3 ? {\"type\": \"usd\", \"usd\": 0.0591, \"format\": $fmt}\n : {\"type\": \"usd\", \"usd\": 0.0457, \"format\": $fmt}\n ) : {\"type\": \"usd\", \"usd\": 0.03432, \"format\": $fmt}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ReveImageEditNode": {"input": {"required": {"image": ["IMAGE", {"tooltip": "The image to edit."}], "edit_instruction": ["STRING", {"tooltip": "Text description of how to edit the image. Maximum 2560 characters.", "default": "", "multiline": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model version to use for editing.", "options": [{"key": "reve-edit@20250915", "inputs": {"required": {"aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of the output image.", "multiselect": false, "options": ["auto", "16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"]}], "test_time_scaling": ["INT", {"tooltip": "Higher values produce better images but cost more credits.", "advanced": true, "default": 1, "min": 1, "max": 5, "step": 1}]}}}, {"key": "reve-edit-fast@20251030", "inputs": {"required": {"aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of the output image.", "multiselect": false, "options": ["auto", "16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"]}], "test_time_scaling": ["INT", {"tooltip": "Higher values produce better images but cost more credits.", "advanced": true, "default": 1, "min": 1, "max": 5, "step": 1}]}}}]}], "upscale": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Upscale the generated image. May add additional cost.", "options": [{"key": "disabled", "inputs": {"required": {}}}, {"key": "enabled", "inputs": {"required": {"upscale_factor": ["INT", {"tooltip": "Upscale factor (2x, 3x, or 4x).", "default": 2, "min": 2, "max": 4, "step": 1}]}}}]}], "remove_background": ["BOOLEAN", {"tooltip": "Remove the background from the generated image. May add additional cost.", "default": false}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "edit_instruction", "model", "upscale", "remove_background", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ReveImageEditNode", "display_name": "Reve Image Edit", "description": "Edit images using natural language instructions with Reve.", "python_module": "comfy_api_nodes.nodes_reve", "category": "api node/image/Reve", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "upscale", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "upscale.upscale_factor", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $fmt := {\"approximate\": true, \"note\": \"(base)\"};\n $isFast := $contains(widgets.model, \"fast\");\n $enabled := widgets.upscale = \"enabled\";\n $factor := $lookup(widgets, \"upscale.upscale_factor\");\n $isFast\n ? {\"type\": \"usd\", \"usd\": 0.01001, \"format\": $fmt}\n : $enabled ? (\n $factor = 4 ? {\"type\": \"usd\", \"usd\": 0.0991, \"format\": $fmt}\n : $factor = 3 ? {\"type\": \"usd\", \"usd\": 0.0819, \"format\": $fmt}\n : {\"type\": \"usd\", \"usd\": 0.0686, \"format\": $fmt}\n ) : {\"type\": \"usd\", \"usd\": 0.0572, \"format\": $fmt}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ReveImageRemixNode": {"input": {"required": {"reference_images": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"image": ["IMAGE", {}]}}, "prefix": "image_", "min": 1, "max": 6}}], "prompt": ["STRING", {"tooltip": "Text description of the desired image. May include XML img tags to reference specific images by index, e.g. 0, 1, etc.", "default": "", "multiline": true}], "model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model version to use for remixing.", "options": [{"key": "reve-remix@20250915", "inputs": {"required": {"aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of the output image.", "multiselect": false, "options": ["auto", "16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"]}], "test_time_scaling": ["INT", {"tooltip": "Higher values produce better images but cost more credits.", "advanced": true, "default": 1, "min": 1, "max": 5, "step": 1}]}}}, {"key": "reve-remix-fast@20251030", "inputs": {"required": {"aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of the output image.", "multiselect": false, "options": ["auto", "16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"]}], "test_time_scaling": ["INT", {"tooltip": "Higher values produce better images but cost more credits.", "advanced": true, "default": 1, "min": 1, "max": 5, "step": 1}]}}}]}], "upscale": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Upscale the generated image. May add additional cost.", "options": [{"key": "disabled", "inputs": {"required": {}}}, {"key": "enabled", "inputs": {"required": {"upscale_factor": ["INT", {"tooltip": "Upscale factor (2x, 3x, or 4x).", "default": 2, "min": 2, "max": 4, "step": 1}]}}}]}], "remove_background": ["BOOLEAN", {"tooltip": "Remove the background from the generated image. May add additional cost.", "default": false}], "seed": ["INT", {"tooltip": "Seed controls whether the node should re-run; results are non-deterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "control_after_generate": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["reference_images", "prompt", "model", "upscale", "remove_background", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "ReveImageRemixNode", "display_name": "Reve Image Remix", "description": "Combine reference images with text prompts to create new images using Reve.", "python_module": "comfy_api_nodes.nodes_reve", "category": "api node/image/Reve", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "upscale", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "upscale.upscale_factor", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $fmt := {\"approximate\": true, \"note\": \"(base)\"};\n $isFast := $contains(widgets.model, \"fast\");\n $enabled := widgets.upscale = \"enabled\";\n $factor := $lookup(widgets, \"upscale.upscale_factor\");\n $isFast\n ? {\"type\": \"usd\", \"usd\": 0.01001, \"format\": $fmt}\n : $enabled ? (\n $factor = 4 ? {\"type\": \"usd\", \"usd\": 0.0991, \"format\": $fmt}\n : $factor = 3 ? {\"type\": \"usd\", \"usd\": 0.0819, \"format\": $fmt}\n : {\"type\": \"usd\", \"usd\": 0.0686, \"format\": $fmt}\n ) : {\"type\": \"usd\", \"usd\": 0.0572, \"format\": $fmt}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Rodin3D_Regular": {"input": {"required": {"Images": ["IMAGE", {}]}, "optional": {"Seed": ["INT", {"default": 0, "min": 0, "max": 65535, "display": "number"}], "Material_Type": ["COMBO", {"default": "PBR", "multiselect": false, "options": ["PBR", "Shaded"]}], "Polygon_count": ["COMBO", {"default": "18K-Quad", "multiselect": false, "options": ["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["Images"], "optional": ["Seed", "Material_Type", "Polygon_count"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING", "FILE_3D_GLB"], "output_is_list": [false, false], "output_name": ["3D Model Path", "GLB"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "Rodin3D_Regular", "display_name": "Rodin 3D Generate - Regular Generate", "description": "Generate 3D Assets using Rodin API", "python_module": "comfy_api_nodes.nodes_rodin", "category": "api node/3d/Rodin", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Rodin3D_Detail": {"input": {"required": {"Images": ["IMAGE", {}]}, "optional": {"Seed": ["INT", {"default": 0, "min": 0, "max": 65535, "display": "number"}], "Material_Type": ["COMBO", {"default": "PBR", "multiselect": false, "options": ["PBR", "Shaded"]}], "Polygon_count": ["COMBO", {"default": "18K-Quad", "multiselect": false, "options": ["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["Images"], "optional": ["Seed", "Material_Type", "Polygon_count"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING", "FILE_3D_GLB"], "output_is_list": [false, false], "output_name": ["3D Model Path", "GLB"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "Rodin3D_Detail", "display_name": "Rodin 3D Generate - Detail Generate", "description": "Generate 3D Assets using Rodin API", "python_module": "comfy_api_nodes.nodes_rodin", "category": "api node/3d/Rodin", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Rodin3D_Smooth": {"input": {"required": {"Images": ["IMAGE", {}]}, "optional": {"Seed": ["INT", {"default": 0, "min": 0, "max": 65535, "display": "number"}], "Material_Type": ["COMBO", {"default": "PBR", "multiselect": false, "options": ["PBR", "Shaded"]}], "Polygon_count": ["COMBO", {"default": "18K-Quad", "multiselect": false, "options": ["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["Images"], "optional": ["Seed", "Material_Type", "Polygon_count"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING", "FILE_3D_GLB"], "output_is_list": [false, false], "output_name": ["3D Model Path", "GLB"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "Rodin3D_Smooth", "display_name": "Rodin 3D Generate - Smooth Generate", "description": "Generate 3D Assets using Rodin API", "python_module": "comfy_api_nodes.nodes_rodin", "category": "api node/3d/Rodin", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Rodin3D_Sketch": {"input": {"required": {"Images": ["IMAGE", {}]}, "optional": {"Seed": ["INT", {"default": 0, "min": 0, "max": 65535, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["Images"], "optional": ["Seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING", "FILE_3D_GLB"], "output_is_list": [false, false], "output_name": ["3D Model Path", "GLB"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "Rodin3D_Sketch", "display_name": "Rodin 3D Generate - Sketch Generate", "description": "Generate 3D Assets using Rodin API", "python_module": "comfy_api_nodes.nodes_rodin", "category": "api node/3d/Rodin", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Rodin3D_Gen2": {"input": {"required": {"Images": ["IMAGE", {}], "TAPose": ["BOOLEAN", {"advanced": true, "default": false}]}, "optional": {"Seed": ["INT", {"default": 0, "min": 0, "max": 65535, "display": "number"}], "Material_Type": ["COMBO", {"default": "PBR", "multiselect": false, "options": ["PBR", "Shaded"]}], "Polygon_count": ["COMBO", {"default": "500K-Triangle", "multiselect": false, "options": ["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "2K-Triangle", "20K-Triangle", "150K-Triangle", "500K-Triangle"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["Images", "TAPose"], "optional": ["Seed", "Material_Type", "Polygon_count"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["STRING", "FILE_3D_GLB"], "output_is_list": [false, false], "output_name": ["3D Model Path", "GLB"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "Rodin3D_Gen2", "display_name": "Rodin 3D Generate - Gen-2 Generate", "description": "Generate 3D Assets using Rodin API", "python_module": "comfy_api_nodes.nodes_rodin", "category": "api node/3d/Rodin", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RunwayFirstLastFrameNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt for the generation", "default": "", "multiline": true}], "start_frame": ["IMAGE", {"tooltip": "Start frame to be used for the video"}], "end_frame": ["IMAGE", {"tooltip": "End frame to be used for the video. Supported for gen3a_turbo only."}], "duration": ["COMBO", {"multiselect": false, "options": [5, 10]}], "ratio": ["COMBO", {"multiselect": false, "options": ["768:1280", "1280:768"]}], "seed": ["INT", {"tooltip": "Random seed for generation", "default": 0, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "start_frame", "end_frame", "duration", "ratio", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "RunwayFirstLastFrameNode", "display_name": "Runway First-Last-Frame to Video", "description": "Upload first and last keyframes, draft a prompt, and generate a video. More complex transitions, such as cases where the Last frame is completely different from the First frame, may benefit from the longer 10s duration. This would give the generation more time to smoothly transition between the two inputs. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.", "python_module": "comfy_api_nodes.nodes_runway", "category": "api node/video/Runway", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.0715 * widgets.duration}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RunwayImageToVideoNodeGen3a": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt for the generation", "default": "", "multiline": true}], "start_frame": ["IMAGE", {"tooltip": "Start frame to be used for the video"}], "duration": ["COMBO", {"multiselect": false, "options": [5, 10]}], "ratio": ["COMBO", {"multiselect": false, "options": ["768:1280", "1280:768"]}], "seed": ["INT", {"tooltip": "Random seed for generation", "default": 0, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "start_frame", "duration", "ratio", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "RunwayImageToVideoNodeGen3a", "display_name": "Runway Image to Video (Gen3a Turbo)", "description": "Generate a video from a single starting frame using Gen3a Turbo model. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.", "python_module": "comfy_api_nodes.nodes_runway", "category": "api node/video/Runway", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.0715 * widgets.duration}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RunwayImageToVideoNodeGen4": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt for the generation", "default": "", "multiline": true}], "start_frame": ["IMAGE", {"tooltip": "Start frame to be used for the video"}], "duration": ["COMBO", {"multiselect": false, "options": [5, 10]}], "ratio": ["COMBO", {"multiselect": false, "options": ["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672"]}], "seed": ["INT", {"tooltip": "Random seed for generation", "default": 0, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "start_frame", "duration", "ratio", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "RunwayImageToVideoNodeGen4", "display_name": "Runway Image to Video (Gen4 Turbo)", "description": "Generate a video from a single starting frame using Gen4 Turbo model. Before diving in, review these best practices to ensure that your input selections will set your generation up for success: https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.", "python_module": "comfy_api_nodes.nodes_runway", "category": "api node/video/Runway", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.0715 * widgets.duration}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "RunwayTextToImageNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text prompt for the generation", "default": "", "multiline": true}], "ratio": ["COMBO", {"multiselect": false, "options": ["1920:1080", "1080:1920", "1024:1024", "1360:768", "1080:1080", "1168:880", "1440:1080", "1080:1440", "1808:768", "2112:912"]}]}, "optional": {"reference_image": ["IMAGE", {"tooltip": "Optional reference image to guide the generation"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "ratio"], "optional": ["reference_image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "RunwayTextToImageNode", "display_name": "Runway Text to Image", "description": "Generate an image from a text prompt using Runway's Gen 4 model. You can also include reference image to guide the generation.", "python_module": "comfy_api_nodes.nodes_runway", "category": "api node/image/Runway", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.11}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "OpenAIVideoSora2": {"input": {"required": {"model": ["COMBO", {"default": "sora-2", "multiselect": false, "options": ["sora-2", "sora-2-pro"]}], "prompt": ["STRING", {"tooltip": "Guiding text; may be empty if an input image is present.", "default": "", "multiline": true}], "size": ["COMBO", {"default": "1280x720", "multiselect": false, "options": ["720x1280", "1280x720", "1024x1792", "1792x1024"]}], "duration": ["COMBO", {"default": 8, "multiselect": false, "options": [4, 8, 12]}]}, "optional": {"image": ["IMAGE", {}], "seed": ["INT", {"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "size", "duration"], "optional": ["image", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "OpenAIVideoSora2", "display_name": "OpenAI Sora - Video", "description": "OpenAI video and audio generation.", "python_module": "comfy_api_nodes.nodes_sora", "category": "api node/video/Sora", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "size", "type": "COMBO"}, {"name": "duration", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $size := widgets.size;\n $dur := widgets.duration;\n $isPro := $contains($m, \"sora-2-pro\");\n $isSora2 := $contains($m, \"sora-2\");\n $isProSize := ($size = \"1024x1792\" or $size = \"1792x1024\");\n $perSec :=\n $isPro ? ($isProSize ? 0.5 : 0.3) :\n $isSora2 ? 0.1 :\n ($isProSize ? 0.5 : 0.1);\n {\"type\":\"usd\",\"usd\": $round($perSec * $dur, 2)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StabilityStableImageUltraNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defineselements, colors, and subjects will lead to better results. To control the weight of a given word use the format `(word:weight)`,where `word` is the word you'd like to control the weight of and `weight`is a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`would convey a sky that was blue and green, but more green than blue.", "default": "", "multiline": true}], "aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of generated image.", "default": "1:1", "multiselect": false, "options": ["1:1", "16:9", "9:16", "3:2", "2:3", "5:4", "4:5", "21:9", "9:21"]}], "style_preset": ["COMBO", {"tooltip": "Optional desired style of generated image.", "advanced": true, "multiselect": false, "options": ["None", "3d-model", "analog-film", "anime", "cinematic", "comic-book", "digital-art", "enhance", "fantasy-art", "isometric", "line-art", "low-poly", "modeling-compound", "neon-punk", "origami", "photographic", "pixel-art", "tile-texture"]}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 4294967294, "step": 1, "control_after_generate": true, "display": "number"}]}, "optional": {"image": ["IMAGE", {}], "negative_prompt": ["STRING", {"tooltip": "A blurb of text describing what you do not wish to see in the output image. This is an advanced feature.", "advanced": true, "default": "", "forceInput": true, "multiline": false}], "image_denoise": ["FLOAT", {"tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "aspect_ratio", "style_preset", "seed"], "optional": ["image", "negative_prompt", "image_denoise"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "StabilityStableImageUltraNode", "display_name": "Stability AI Stable Image Ultra", "description": "Generates images synchronously based on prompt and resolution.", "python_module": "comfy_api_nodes.nodes_stability", "category": "api node/image/Stability AI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.08}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StabilityStableImageSD_3_5Node": {"input": {"required": {"prompt": ["STRING", {"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", "default": "", "multiline": true}], "model": ["COMBO", {"multiselect": false, "options": ["sd3.5-large", "sd3.5-medium"]}], "aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of generated image.", "default": "1:1", "multiselect": false, "options": ["1:1", "16:9", "9:16", "3:2", "2:3", "5:4", "4:5", "21:9", "9:21"]}], "style_preset": ["COMBO", {"tooltip": "Optional desired style of generated image.", "advanced": true, "multiselect": false, "options": ["None", "3d-model", "analog-film", "anime", "cinematic", "comic-book", "digital-art", "enhance", "fantasy-art", "isometric", "line-art", "low-poly", "modeling-compound", "neon-punk", "origami", "photographic", "pixel-art", "tile-texture"]}], "cfg_scale": ["FLOAT", {"tooltip": "How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)", "default": 4.0, "min": 1.0, "max": 10.0, "step": 0.1}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 4294967294, "step": 1, "control_after_generate": true, "display": "number"}]}, "optional": {"image": ["IMAGE", {}], "negative_prompt": ["STRING", {"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature.", "advanced": true, "default": "", "forceInput": true, "multiline": false}], "image_denoise": ["FLOAT", {"tooltip": "Denoise of input image; 0.0 yields image identical to input, 1.0 is as if no image was provided at all.", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "model", "aspect_ratio", "style_preset", "cfg_scale", "seed"], "optional": ["image", "negative_prompt", "image_denoise"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "StabilityStableImageSD_3_5Node", "display_name": "Stability AI Stable Diffusion 3.5 Image", "description": "Generates images synchronously based on prompt and resolution.", "python_module": "comfy_api_nodes.nodes_stability", "category": "api node/image/Stability AI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $contains(widgets.model,\"large\")\n ? {\"type\":\"usd\",\"usd\":0.065}\n : {\"type\":\"usd\",\"usd\":0.035}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StabilityUpscaleConservativeNode": {"input": {"required": {"image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", "default": "", "multiline": true}], "creativity": ["FLOAT", {"tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image.", "default": 0.35, "min": 0.2, "max": 0.5, "step": 0.01}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 4294967294, "step": 1, "control_after_generate": true, "display": "number"}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature.", "advanced": true, "default": "", "forceInput": true, "multiline": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "creativity", "seed"], "optional": ["negative_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "StabilityUpscaleConservativeNode", "display_name": "Stability AI Upscale Conservative", "description": "Upscale image with minimal alterations to 4K resolution.", "python_module": "comfy_api_nodes.nodes_stability", "category": "api node/image/Stability AI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.25}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StabilityUpscaleCreativeNode": {"input": {"required": {"image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", "default": "", "multiline": true}], "creativity": ["FLOAT", {"tooltip": "Controls the likelihood of creating additional details not heavily conditioned by the init image.", "default": 0.3, "min": 0.1, "max": 0.5, "step": 0.01}], "style_preset": ["COMBO", {"tooltip": "Optional desired style of generated image.", "advanced": true, "multiselect": false, "options": ["None", "3d-model", "analog-film", "anime", "cinematic", "comic-book", "digital-art", "enhance", "fantasy-art", "isometric", "line-art", "low-poly", "modeling-compound", "neon-punk", "origami", "photographic", "pixel-art", "tile-texture"]}], "seed": ["INT", {"tooltip": "The random seed used for creating the noise.", "default": 0, "min": 0, "max": 4294967294, "step": 1, "control_after_generate": true, "display": "number"}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "Keywords of what you do not wish to see in the output image. This is an advanced feature.", "advanced": true, "default": "", "forceInput": true, "multiline": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image", "prompt", "creativity", "style_preset", "seed"], "optional": ["negative_prompt"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "StabilityUpscaleCreativeNode", "display_name": "Stability AI Upscale Creative", "description": "Upscale image with minimal alterations to 4K resolution.", "python_module": "comfy_api_nodes.nodes_stability", "category": "api node/image/Stability AI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.25}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StabilityUpscaleFastNode": {"input": {"required": {"image": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["image"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "StabilityUpscaleFastNode", "display_name": "Stability AI Upscale Fast", "description": "Quickly upscales an image via Stability API call to 4x its original size; intended for upscaling low-quality/compressed images.", "python_module": "comfy_api_nodes.nodes_stability", "category": "api node/image/Stability AI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.01}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StabilityTextToAudio": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["stable-audio-2.5"]}], "prompt": ["STRING", {"default": "", "multiline": true}]}, "optional": {"duration": ["INT", {"tooltip": "Controls the duration in seconds of the generated audio.", "default": 190, "min": 1, "max": 190, "step": 1}], "seed": ["INT", {"tooltip": "The random seed used for generation.", "default": 0, "min": 0, "max": 4294967294, "step": 1, "control_after_generate": true, "display": "number"}], "steps": ["INT", {"tooltip": "Controls the number of sampling steps.", "advanced": true, "default": 8, "min": 4, "max": 8, "step": 1}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt"], "optional": ["duration", "seed", "steps"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "StabilityTextToAudio", "display_name": "Stability AI Text To Audio", "description": "Generates high-quality music and sound effects from text descriptions.", "python_module": "comfy_api_nodes.nodes_stability", "category": "api node/audio/Stability AI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.2}"}, "search_aliases": null, "essentials_category": "Audio", "has_intermediate_output": false}, "StabilityAudioToAudio": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["stable-audio-2.5"]}], "prompt": ["STRING", {"default": "", "multiline": true}], "audio": ["AUDIO", {"tooltip": "Audio must be between 6 and 190 seconds long."}]}, "optional": {"duration": ["INT", {"tooltip": "Controls the duration in seconds of the generated audio.", "default": 190, "min": 1, "max": 190, "step": 1}], "seed": ["INT", {"tooltip": "The random seed used for generation.", "default": 0, "min": 0, "max": 4294967294, "step": 1, "control_after_generate": true, "display": "number"}], "steps": ["INT", {"tooltip": "Controls the number of sampling steps.", "advanced": true, "default": 8, "min": 4, "max": 8, "step": 1}], "strength": ["FLOAT", {"tooltip": "Parameter controls how much influence the audio parameter has on the generated audio.", "default": 1, "min": 0.01, "max": 1.0, "step": 0.01, "display": "slider"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "audio"], "optional": ["duration", "seed", "steps", "strength"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "StabilityAudioToAudio", "display_name": "Stability AI Audio To Audio", "description": "Transforms existing audio samples into new high-quality compositions using text instructions.", "python_module": "comfy_api_nodes.nodes_stability", "category": "api node/audio/Stability AI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.2}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "StabilityAudioInpaint": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["stable-audio-2.5"]}], "prompt": ["STRING", {"default": "", "multiline": true}], "audio": ["AUDIO", {"tooltip": "Audio must be between 6 and 190 seconds long."}]}, "optional": {"duration": ["INT", {"tooltip": "Controls the duration in seconds of the generated audio.", "default": 190, "min": 1, "max": 190, "step": 1}], "seed": ["INT", {"tooltip": "The random seed used for generation.", "default": 0, "min": 0, "max": 4294967294, "step": 1, "control_after_generate": true, "display": "number"}], "steps": ["INT", {"tooltip": "Controls the number of sampling steps.", "advanced": true, "default": 8, "min": 4, "max": 8, "step": 1}], "mask_start": ["INT", {"advanced": true, "default": 30, "min": 0, "max": 190, "step": 1}], "mask_end": ["INT", {"advanced": true, "default": 190, "min": 0, "max": 190, "step": 1}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "audio"], "optional": ["duration", "seed", "steps", "mask_start", "mask_end"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "output_tooltips": [null], "output_matchtypes": null, "name": "StabilityAudioInpaint", "display_name": "Stability AI Audio Inpaint", "description": "Transforms part of existing audio sample using text instructions.", "python_module": "comfy_api_nodes.nodes_stability", "category": "api node/audio/Stability AI", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.2}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TopazImageEnhance": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["Reimagine"]}], "image": ["IMAGE", {}]}, "optional": {"prompt": ["STRING", {"tooltip": "Optional text prompt for creative upscaling guidance.", "default": "", "multiline": true}], "subject_detection": ["COMBO", {"advanced": true, "multiselect": false, "options": ["All", "Foreground", "Background"]}], "face_enhancement": ["BOOLEAN", {"tooltip": "Enhance faces (if present) during processing.", "advanced": true, "default": true}], "face_enhancement_creativity": ["FLOAT", {"tooltip": "Set the creativity level for face enhancement.", "advanced": true, "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "display": "number"}], "face_enhancement_strength": ["FLOAT", {"tooltip": "Controls how sharp enhanced faces are relative to the background.", "advanced": true, "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "display": "number"}], "crop_to_fill": ["BOOLEAN", {"tooltip": "By default, the image is letterboxed when the output aspect ratio differs. Enable to crop the image to fill the output dimensions.", "advanced": true, "default": false}], "output_width": ["INT", {"tooltip": "Zero value means to calculate automatically (usually it will be original size or output_height if specified).", "advanced": true, "default": 0, "min": 0, "max": 32000, "step": 1, "display": "number"}], "output_height": ["INT", {"tooltip": "Zero value means to output in the same height as original or output width.", "advanced": true, "default": 0, "min": 0, "max": 32000, "step": 1, "display": "number"}], "creativity": ["INT", {"default": 3, "min": 1, "max": 9, "step": 1, "display": "slider"}], "face_preservation": ["BOOLEAN", {"tooltip": "Preserve subjects' facial identity.", "advanced": true, "default": true}], "color_preservation": ["BOOLEAN", {"tooltip": "Preserve the original colors.", "advanced": true, "default": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image"], "optional": ["prompt", "subject_detection", "face_enhancement", "face_enhancement_creativity", "face_enhancement_strength", "crop_to_fill", "output_width", "output_height", "creativity", "face_preservation", "color_preservation"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "TopazImageEnhance", "display_name": "Topaz Image Enhance", "description": "Industry-standard upscaling and image enhancement.", "python_module": "comfy_api_nodes.nodes_topaz", "category": "api node/image/Topaz", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TopazVideoEnhance": {"input": {"required": {"video": ["VIDEO", {}], "upscaler_enabled": ["BOOLEAN", {"default": true}], "upscaler_model": ["COMBO", {"multiselect": false, "options": ["Starlight (Astra) Fast", "Starlight (Astra) Creative", "Starlight Precise 2.5"]}], "upscaler_resolution": ["COMBO", {"multiselect": false, "options": ["FullHD (1080p)", "4K (2160p)"]}]}, "optional": {"upscaler_creativity": ["COMBO", {"tooltip": "Creativity level (applies only to Starlight (Astra) Creative).", "advanced": true, "default": "low", "multiselect": false, "options": ["low", "middle", "high"]}], "interpolation_enabled": ["BOOLEAN", {"default": false}], "interpolation_model": ["COMBO", {"advanced": true, "default": "apo-8", "multiselect": false, "options": ["apo-8"]}], "interpolation_slowmo": ["INT", {"tooltip": "Slow-motion factor applied to the input video. For example, 2 makes the output twice as slow and doubles the duration.", "advanced": true, "default": 1, "min": 1, "max": 16, "display": "number"}], "interpolation_frame_rate": ["INT", {"tooltip": "Output frame rate.", "default": 60, "min": 15, "max": 240, "display": "number"}], "interpolation_duplicate": ["BOOLEAN", {"tooltip": "Analyze the input for duplicate frames and remove them.", "advanced": true, "default": false}], "interpolation_duplicate_threshold": ["FLOAT", {"tooltip": "Detection sensitivity for duplicate frames.", "advanced": true, "default": 0.01, "min": 0.001, "max": 0.1, "step": 0.001, "display": "number"}], "dynamic_compression_level": ["COMBO", {"tooltip": "CQP level.", "advanced": true, "default": "Low", "multiselect": false, "options": ["Low", "Mid", "High"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["video", "upscaler_enabled", "upscaler_model", "upscaler_resolution"], "optional": ["upscaler_creativity", "interpolation_enabled", "interpolation_model", "interpolation_slowmo", "interpolation_frame_rate", "interpolation_duplicate", "interpolation_duplicate_threshold", "dynamic_compression_level"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "TopazVideoEnhance", "display_name": "Topaz Video Enhance", "description": "Breathe new life into video with powerful upscaling and recovery technology.", "python_module": "comfy_api_nodes.nodes_topaz", "category": "api node/video/Topaz", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TripoTextToModelNode": {"input": {"required": {"prompt": ["STRING", {"multiline": true}]}, "optional": {"negative_prompt": ["STRING", {"multiline": true}], "model_version": ["COMBO", {"default": "v2.5-20250123", "multiselect": false, "options": ["v3.0-20250812", "v2.5-20250123", "v2.0-20240919", "v1.4-20240625"]}], "style": ["COMBO", {"default": "None", "multiselect": false, "options": ["person:person2cartoon", "animal:venom", "object:clay", "object:steampunk", "object:christmas", "object:barbie", "gold", "ancient_bronze", "None"]}], "texture": ["BOOLEAN", {"default": true}], "pbr": ["BOOLEAN", {"default": true}], "image_seed": ["INT", {"advanced": true, "default": 42}], "model_seed": ["INT", {"advanced": true, "default": 42}], "texture_seed": ["INT", {"advanced": true, "default": 42}], "texture_quality": ["COMBO", {"advanced": true, "default": "standard", "multiselect": false, "options": ["standard", "detailed"]}], "face_limit": ["INT", {"advanced": true, "default": -1, "min": -1, "max": 2000000}], "quad": ["BOOLEAN", {"advanced": true, "default": false}], "geometry_quality": ["COMBO", {"advanced": true, "default": "standard", "multiselect": false, "options": ["standard", "detailed"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["prompt"], "optional": ["negative_prompt", "model_version", "style", "texture", "pbr", "image_seed", "model_seed", "texture_seed", "texture_quality", "face_limit", "quad", "geometry_quality"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MODEL_TASK_ID", "FILE_3D_GLB"], "output_is_list": [false, false, false], "output_name": ["model_file", "model task_id", "GLB"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "TripoTextToModelNode", "display_name": "Tripo: Text to Model", "description": "", "python_module": "comfy_api_nodes.nodes_tripo", "category": "api node/3d/Tripo", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model_version", "type": "COMBO"}, {"name": "style", "type": "COMBO"}, {"name": "texture", "type": "BOOLEAN"}, {"name": "pbr", "type": "BOOLEAN"}, {"name": "quad", "type": "BOOLEAN"}, {"name": "texture_quality", "type": "COMBO"}, {"name": "geometry_quality", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $isV14 := $contains(widgets.model_version,\"v1.4\");\n $style := widgets.style;\n $hasStyle := ($style != \"\" and $style != \"none\");\n $withTexture := widgets.texture or widgets.pbr;\n $isHdTexture := (widgets.texture_quality = \"detailed\");\n $isDetailedGeometry := (widgets.geometry_quality = \"detailed\");\n $baseCredits :=\n $isV14 ? 20 : ($withTexture ? 20 : 10);\n $credits :=\n $baseCredits\n + ($hasStyle ? 5 : 0)\n + (widgets.quad ? 5 : 0)\n + ($isHdTexture ? 10 : 0)\n + ($isDetailedGeometry ? 20 : 0);\n {\"type\":\"usd\",\"usd\": $round($credits * 0.01, 2)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TripoImageToModelNode": {"input": {"required": {"image": ["IMAGE", {}]}, "optional": {"model_version": ["COMBO", {"tooltip": "The model version to use for generation", "multiselect": false, "options": ["v3.0-20250812", "v2.5-20250123", "v2.0-20240919", "v1.4-20240625"]}], "style": ["COMBO", {"default": "None", "multiselect": false, "options": ["person:person2cartoon", "animal:venom", "object:clay", "object:steampunk", "object:christmas", "object:barbie", "gold", "ancient_bronze", "None"]}], "texture": ["BOOLEAN", {"default": true}], "pbr": ["BOOLEAN", {"default": true}], "model_seed": ["INT", {"advanced": true, "default": 42}], "orientation": ["COMBO", {"advanced": true, "default": "default", "multiselect": false, "options": ["align_image", "default"]}], "texture_seed": ["INT", {"advanced": true, "default": 42}], "texture_quality": ["COMBO", {"advanced": true, "default": "standard", "multiselect": false, "options": ["standard", "detailed"]}], "texture_alignment": ["COMBO", {"advanced": true, "default": "original_image", "multiselect": false, "options": ["original_image", "geometry"]}], "face_limit": ["INT", {"advanced": true, "default": -1, "min": -1, "max": 500000}], "quad": ["BOOLEAN", {"advanced": true, "default": false}], "geometry_quality": ["COMBO", {"advanced": true, "default": "standard", "multiselect": false, "options": ["standard", "detailed"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["image"], "optional": ["model_version", "style", "texture", "pbr", "model_seed", "orientation", "texture_seed", "texture_quality", "texture_alignment", "face_limit", "quad", "geometry_quality"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MODEL_TASK_ID", "FILE_3D_GLB"], "output_is_list": [false, false, false], "output_name": ["model_file", "model task_id", "GLB"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "TripoImageToModelNode", "display_name": "Tripo: Image to Model", "description": "", "python_module": "comfy_api_nodes.nodes_tripo", "category": "api node/3d/Tripo", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model_version", "type": "COMBO"}, {"name": "style", "type": "COMBO"}, {"name": "texture", "type": "BOOLEAN"}, {"name": "pbr", "type": "BOOLEAN"}, {"name": "quad", "type": "BOOLEAN"}, {"name": "texture_quality", "type": "COMBO"}, {"name": "geometry_quality", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $isV14 := $contains(widgets.model_version,\"v1.4\");\n $style := widgets.style;\n $hasStyle := ($style != \"\" and $style != \"none\");\n $withTexture := widgets.texture or widgets.pbr;\n $isHdTexture := (widgets.texture_quality = \"detailed\");\n $isDetailedGeometry := (widgets.geometry_quality = \"detailed\");\n $baseCredits :=\n $isV14 ? 30 : ($withTexture ? 30 : 20);\n $credits :=\n $baseCredits\n + ($hasStyle ? 5 : 0)\n + (widgets.quad ? 5 : 0)\n + ($isHdTexture ? 10 : 0)\n + ($isDetailedGeometry ? 20 : 0);\n {\"type\":\"usd\",\"usd\": $round($credits * 0.01, 2)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TripoMultiviewToModelNode": {"input": {"required": {"image": ["IMAGE", {}]}, "optional": {"image_left": ["IMAGE", {}], "image_back": ["IMAGE", {}], "image_right": ["IMAGE", {}], "model_version": ["COMBO", {"tooltip": "The model version to use for generation", "multiselect": false, "options": ["v3.0-20250812", "v2.5-20250123", "v2.0-20240919", "v1.4-20240625"]}], "orientation": ["COMBO", {"advanced": true, "default": "default", "multiselect": false, "options": ["align_image", "default"]}], "texture": ["BOOLEAN", {"default": true}], "pbr": ["BOOLEAN", {"default": true}], "model_seed": ["INT", {"advanced": true, "default": 42}], "texture_seed": ["INT", {"advanced": true, "default": 42}], "texture_quality": ["COMBO", {"advanced": true, "default": "standard", "multiselect": false, "options": ["standard", "detailed"]}], "texture_alignment": ["COMBO", {"advanced": true, "default": "original_image", "multiselect": false, "options": ["original_image", "geometry"]}], "face_limit": ["INT", {"advanced": true, "default": -1, "min": -1, "max": 500000}], "quad": ["BOOLEAN", {"advanced": true, "default": false}], "geometry_quality": ["COMBO", {"advanced": true, "default": "standard", "multiselect": false, "options": ["standard", "detailed"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["image"], "optional": ["image_left", "image_back", "image_right", "model_version", "orientation", "texture", "pbr", "model_seed", "texture_seed", "texture_quality", "texture_alignment", "face_limit", "quad", "geometry_quality"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MODEL_TASK_ID", "FILE_3D_GLB"], "output_is_list": [false, false, false], "output_name": ["model_file", "model task_id", "GLB"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "TripoMultiviewToModelNode", "display_name": "Tripo: Multiview to Model", "description": "", "python_module": "comfy_api_nodes.nodes_tripo", "category": "api node/3d/Tripo", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model_version", "type": "COMBO"}, {"name": "texture", "type": "BOOLEAN"}, {"name": "pbr", "type": "BOOLEAN"}, {"name": "quad", "type": "BOOLEAN"}, {"name": "texture_quality", "type": "COMBO"}, {"name": "geometry_quality", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $isV14 := $contains(widgets.model_version,\"v1.4\");\n $withTexture := widgets.texture or widgets.pbr;\n $isHdTexture := (widgets.texture_quality = \"detailed\");\n $isDetailedGeometry := (widgets.geometry_quality = \"detailed\");\n $baseCredits :=\n $isV14 ? 30 : ($withTexture ? 30 : 20);\n $credits :=\n $baseCredits\n + (widgets.quad ? 5 : 0)\n + ($isHdTexture ? 10 : 0)\n + ($isDetailedGeometry ? 20 : 0);\n {\"type\":\"usd\",\"usd\": $round($credits * 0.01, 2)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TripoTextureNode": {"input": {"required": {"model_task_id": ["MODEL_TASK_ID", {}]}, "optional": {"texture": ["BOOLEAN", {"default": true}], "pbr": ["BOOLEAN", {"default": true}], "texture_seed": ["INT", {"advanced": true, "default": 42}], "texture_quality": ["COMBO", {"advanced": true, "default": "standard", "multiselect": false, "options": ["standard", "detailed"]}], "texture_alignment": ["COMBO", {"advanced": true, "default": "original_image", "multiselect": false, "options": ["original_image", "geometry"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model_task_id"], "optional": ["texture", "pbr", "texture_seed", "texture_quality", "texture_alignment"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MODEL_TASK_ID", "FILE_3D_GLB"], "output_is_list": [false, false, false], "output_name": ["model_file", "model task_id", "GLB"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "TripoTextureNode", "display_name": "Tripo: Texture model", "description": "", "python_module": "comfy_api_nodes.nodes_tripo", "category": "api node/3d/Tripo", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "texture_quality", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $tq := widgets.texture_quality;\n {\"type\":\"usd\",\"usd\": ($contains($tq,\"detailed\") ? 0.2 : 0.1)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TripoRefineNode": {"input": {"required": {"model_task_id": ["MODEL_TASK_ID", {"tooltip": "Must be a v1.4 Tripo model"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["model_task_id"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "MODEL_TASK_ID", "FILE_3D_GLB"], "output_is_list": [false, false, false], "output_name": ["model_file", "model task_id", "GLB"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "TripoRefineNode", "display_name": "Tripo: Refine Draft model", "description": "Refine a draft model created by v1.4 Tripo models only.", "python_module": "comfy_api_nodes.nodes_tripo", "category": "api node/3d/Tripo", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.3}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TripoRigNode": {"input": {"required": {"original_model_task_id": ["MODEL_TASK_ID", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["original_model_task_id"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "RIG_TASK_ID", "FILE_3D_GLB"], "output_is_list": [false, false, false], "output_name": ["model_file", "rig task_id", "GLB"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "TripoRigNode", "display_name": "Tripo: Rig model", "description": "", "python_module": "comfy_api_nodes.nodes_tripo", "category": "api node/3d/Tripo", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.25}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TripoRetargetNode": {"input": {"required": {"original_model_task_id": ["RIG_TASK_ID", {}], "animation": ["COMBO", {"multiselect": false, "options": ["preset:idle", "preset:walk", "preset:run", "preset:dive", "preset:climb", "preset:jump", "preset:slash", "preset:shoot", "preset:hurt", "preset:fall", "preset:turn", "preset:quadruped:walk", "preset:hexapod:walk", "preset:octopod:walk", "preset:serpentine:march", "preset:aquatic:march"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["original_model_task_id", "animation"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "RETARGET_TASK_ID", "FILE_3D_GLB"], "output_is_list": [false, false, false], "output_name": ["model_file", "retarget task_id", "GLB"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "TripoRetargetNode", "display_name": "Tripo: Retarget rigged model", "description": "", "python_module": "comfy_api_nodes.nodes_tripo", "category": "api node/3d/Tripo", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.1}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "TripoConversionNode": {"input": {"required": {"original_model_task_id": ["MODEL_TASK_ID,RIG_TASK_ID,RETARGET_TASK_ID", {}], "format": ["COMBO", {"multiselect": false, "options": ["GLTF", "USDZ", "FBX", "OBJ", "STL", "3MF"]}]}, "optional": {"quad": ["BOOLEAN", {"advanced": true, "default": false}], "face_limit": ["INT", {"advanced": true, "default": -1, "min": -1, "max": 2000000}], "texture_size": ["INT", {"advanced": true, "default": 4096, "min": 128, "max": 4096}], "texture_format": ["COMBO", {"advanced": true, "default": "JPEG", "multiselect": false, "options": ["BMP", "DPX", "HDR", "JPEG", "OPEN_EXR", "PNG", "TARGA", "TIFF", "WEBP"]}], "force_symmetry": ["BOOLEAN", {"advanced": true, "default": false}], "flatten_bottom": ["BOOLEAN", {"advanced": true, "default": false}], "flatten_bottom_threshold": ["FLOAT", {"advanced": true, "default": 0.0, "min": 0.0, "max": 1.0}], "pivot_to_center_bottom": ["BOOLEAN", {"advanced": true, "default": false}], "scale_factor": ["FLOAT", {"advanced": true, "default": 1.0, "min": 0.0}], "with_animation": ["BOOLEAN", {"advanced": true, "default": false}], "pack_uv": ["BOOLEAN", {"advanced": true, "default": false}], "bake": ["BOOLEAN", {"advanced": true, "default": false}], "part_names": ["STRING", {"advanced": true, "default": "", "multiline": false}], "fbx_preset": ["COMBO", {"advanced": true, "default": "blender", "multiselect": false, "options": ["blender", "mixamo", "3dsmax"]}], "export_vertex_colors": ["BOOLEAN", {"advanced": true, "default": false}], "export_orientation": ["COMBO", {"advanced": true, "default": "default", "multiselect": false, "options": ["align_image", "default"]}], "animate_in_place": ["BOOLEAN", {"advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"], "prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["original_model_task_id", "format"], "optional": ["quad", "face_limit", "texture_size", "texture_format", "force_symmetry", "flatten_bottom", "flatten_bottom_threshold", "pivot_to_center_bottom", "scale_factor", "with_animation", "pack_uv", "bake", "part_names", "fbx_preset", "export_vertex_colors", "export_orientation", "animate_in_place"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "TripoConversionNode", "display_name": "Tripo: Convert model", "description": "", "python_module": "comfy_api_nodes.nodes_tripo", "category": "api node/3d/Tripo", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "quad", "type": "BOOLEAN"}, {"name": "face_limit", "type": "INT"}, {"name": "texture_size", "type": "INT"}, {"name": "texture_format", "type": "COMBO"}, {"name": "force_symmetry", "type": "BOOLEAN"}, {"name": "flatten_bottom", "type": "BOOLEAN"}, {"name": "flatten_bottom_threshold", "type": "FLOAT"}, {"name": "pivot_to_center_bottom", "type": "BOOLEAN"}, {"name": "scale_factor", "type": "FLOAT"}, {"name": "with_animation", "type": "BOOLEAN"}, {"name": "pack_uv", "type": "BOOLEAN"}, {"name": "bake", "type": "BOOLEAN"}, {"name": "part_names", "type": "STRING"}, {"name": "fbx_preset", "type": "COMBO"}, {"name": "export_vertex_colors", "type": "BOOLEAN"}, {"name": "export_orientation", "type": "COMBO"}, {"name": "animate_in_place", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $face := (widgets.face_limit != null) ? widgets.face_limit : -1;\n $texSize := (widgets.texture_size != null) ? widgets.texture_size : 4096;\n $flatThresh := (widgets.flatten_bottom_threshold != null) ? widgets.flatten_bottom_threshold : 0;\n $scale := (widgets.scale_factor != null) ? widgets.scale_factor : 1;\n $texFmt := (widgets.texture_format != \"\" ? widgets.texture_format : \"jpeg\");\n $part := widgets.part_names;\n $fbx := (widgets.fbx_preset != \"\" ? widgets.fbx_preset : \"blender\");\n $orient := (widgets.export_orientation != \"\" ? widgets.export_orientation : \"default\");\n $advanced :=\n widgets.quad or\n widgets.force_symmetry or\n widgets.flatten_bottom or\n widgets.pivot_to_center_bottom or\n widgets.with_animation or\n widgets.pack_uv or\n widgets.bake or\n widgets.export_vertex_colors or\n widgets.animate_in_place or\n ($face != -1) or\n ($texSize != 4096) or\n ($flatThresh != 0) or\n ($scale != 1) or\n ($texFmt != \"jpeg\") or\n ($part != \"\") or\n ($fbx != \"blender\") or\n ($orient != \"default\");\n {\"type\":\"usd\",\"usd\": ($advanced ? 0.1 : 0.05)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "VeoVideoGenerationNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text description of the video", "default": "", "multiline": true}], "aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of the output video", "default": "16:9", "multiselect": false, "options": ["16:9", "9:16"]}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "Negative text prompt to guide what to avoid in the video", "default": "", "multiline": true}], "duration_seconds": ["INT", {"tooltip": "Duration of the output video in seconds", "default": 5, "min": 5, "max": 8, "step": 1, "display": "number"}], "enhance_prompt": ["BOOLEAN", {"tooltip": "Whether to enhance the prompt with AI assistance", "advanced": true, "default": true}], "person_generation": ["COMBO", {"tooltip": "Whether to allow generating people in the video", "advanced": true, "default": "ALLOW", "multiselect": false, "options": ["ALLOW", "BLOCK"]}], "seed": ["INT", {"tooltip": "Seed for video generation (0 for random)", "default": 0, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": true, "display": "number"}], "image": ["IMAGE", {"tooltip": "Optional reference image to guide video generation"}], "model": ["COMBO", {"tooltip": "Veo 2 model to use for video generation", "default": "veo-2.0-generate-001", "multiselect": false, "options": ["veo-2.0-generate-001"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "aspect_ratio"], "optional": ["negative_prompt", "duration_seconds", "enhance_prompt", "person_generation", "seed", "image", "model"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "VeoVideoGenerationNode", "display_name": "Google Veo 2 Video Generation", "description": "Generates videos from text prompts using Google's Veo 2 API", "python_module": "comfy_api_nodes.nodes_veo2", "category": "api node/video/Veo", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration_seconds", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\": 0.5 * widgets.duration_seconds}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Veo3VideoGenerationNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text description of the video", "default": "", "multiline": true}], "aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of the output video", "default": "16:9", "multiselect": false, "options": ["16:9", "9:16"]}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "Negative text prompt to guide what to avoid in the video", "default": "", "multiline": true}], "duration_seconds": ["INT", {"tooltip": "Duration of the output video in seconds (Veo 3 only supports 8 seconds)", "default": 8, "min": 8, "max": 8, "step": 1, "display": "number"}], "enhance_prompt": ["BOOLEAN", {"tooltip": "This parameter is deprecated and ignored.", "advanced": true, "default": true}], "person_generation": ["COMBO", {"tooltip": "Whether to allow generating people in the video", "advanced": true, "default": "ALLOW", "multiselect": false, "options": ["ALLOW", "BLOCK"]}], "seed": ["INT", {"tooltip": "Seed for video generation (0 for random)", "default": 0, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": true, "display": "number"}], "image": ["IMAGE", {"tooltip": "Optional reference image to guide video generation"}], "model": ["COMBO", {"tooltip": "Veo 3 model to use for video generation", "default": "veo-3.0-generate-001", "multiselect": false, "options": ["veo-3.1-generate", "veo-3.1-fast-generate", "veo-3.0-generate-001", "veo-3.0-fast-generate-001"]}], "generate_audio": ["BOOLEAN", {"tooltip": "Generate audio for the video. Supported by all Veo 3 models.", "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "aspect_ratio"], "optional": ["negative_prompt", "duration_seconds", "enhance_prompt", "person_generation", "seed", "image", "model", "generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Veo3VideoGenerationNode", "display_name": "Google Veo 3 Video Generation", "description": "Generates videos from text prompts using Google's Veo 3 API", "python_module": "comfy_api_nodes.nodes_veo2", "category": "api node/video/Veo", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $a := widgets.generate_audio;\n ($contains($m,\"veo-3.0-fast-generate-001\") or $contains($m,\"veo-3.1-fast-generate\"))\n ? {\"type\":\"usd\",\"usd\": ($a ? 1.2 : 0.8)}\n : ($contains($m,\"veo-3.0-generate-001\") or $contains($m,\"veo-3.1-generate\"))\n ? {\"type\":\"usd\",\"usd\": ($a ? 3.2 : 1.6)}\n : {\"type\":\"range_usd\",\"min_usd\":0.8,\"max_usd\":3.2}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Veo3FirstLastFrameNode": {"input": {"required": {"prompt": ["STRING", {"tooltip": "Text description of the video", "default": "", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative text prompt to guide what to avoid in the video", "default": "", "multiline": true}], "resolution": ["COMBO", {"multiselect": false, "options": ["720p", "1080p"]}], "aspect_ratio": ["COMBO", {"tooltip": "Aspect ratio of the output video", "default": "16:9", "multiselect": false, "options": ["16:9", "9:16"]}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds", "default": 8, "min": 4, "max": 8, "step": 2, "display": "slider"}], "seed": ["INT", {"tooltip": "Seed for video generation", "default": 0, "min": 0, "max": 4294967295, "step": 1, "control_after_generate": true, "display": "number"}], "first_frame": ["IMAGE", {"tooltip": "Start frame"}], "last_frame": ["IMAGE", {"tooltip": "End frame"}], "model": ["COMBO", {"default": "veo-3.1-fast-generate", "multiselect": false, "options": ["veo-3.1-generate", "veo-3.1-fast-generate"]}], "generate_audio": ["BOOLEAN", {"tooltip": "Generate audio for the video.", "default": true}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "negative_prompt", "resolution", "aspect_ratio", "duration", "seed", "first_frame", "last_frame", "model", "generate_audio"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Veo3FirstLastFrameNode", "display_name": "Google Veo 3 First-Last-Frame to Video", "description": "Generate video using prompt and first and last frames.", "python_module": "comfy_api_nodes.nodes_veo2", "category": "api node/video/Veo", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "generate_audio", "type": "BOOLEAN"}, {"name": "duration", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\n \"veo-3.1-fast-generate\": { \"audio\": 0.15, \"no_audio\": 0.10 },\n \"veo-3.1-generate\": { \"audio\": 0.40, \"no_audio\": 0.20 }\n };\n $m := widgets.model;\n $ga := (widgets.generate_audio = \"true\");\n $seconds := widgets.duration;\n $modelKey :=\n $contains($m, \"veo-3.1-fast-generate\") ? \"veo-3.1-fast-generate\" :\n $contains($m, \"veo-3.1-generate\") ? \"veo-3.1-generate\" :\n \"\";\n $audioKey := $ga ? \"audio\" : \"no_audio\";\n $modelPrices := $lookup($prices, $modelKey);\n $pps := $lookup($modelPrices, $audioKey);\n ($pps != null)\n ? {\"type\":\"usd\",\"usd\": $pps * $seconds}\n : {\"type\":\"range_usd\",\"min_usd\": 0.4, \"max_usd\": 3.2}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ViduTextToVideoNode": {"input": {"required": {"model": ["COMBO", {"tooltip": "Model name", "multiselect": false, "options": ["viduq1"]}], "prompt": ["STRING", {"tooltip": "A textual description for video generation", "multiline": true}]}, "optional": {"duration": ["INT", {"tooltip": "Duration of the output video in seconds", "default": 5, "min": 5, "max": 5, "step": 1, "display": "number"}], "seed": ["INT", {"tooltip": "Seed for video generation (0 for random)", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video", "multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "resolution": ["COMBO", {"tooltip": "Supported values may vary by model & duration", "advanced": true, "multiselect": false, "options": ["1080p"]}], "movement_amplitude": ["COMBO", {"tooltip": "The movement amplitude of objects in the frame", "advanced": true, "multiselect": false, "options": ["auto", "small", "medium", "large"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt"], "optional": ["duration", "seed", "aspect_ratio", "resolution", "movement_amplitude"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ViduTextToVideoNode", "display_name": "Vidu Text To Video Generation", "description": "Generate video from a text prompt", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ViduImageToVideoNode": {"input": {"required": {"model": ["COMBO", {"tooltip": "Model name", "multiselect": false, "options": ["viduq1"]}], "image": ["IMAGE", {"tooltip": "An image to be used as the start frame of the generated video"}]}, "optional": {"prompt": ["STRING", {"tooltip": "A textual description for video generation", "default": "", "multiline": true}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds", "default": 5, "min": 5, "max": 5, "step": 1, "display": "number"}], "seed": ["INT", {"tooltip": "Seed for video generation (0 for random)", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "resolution": ["COMBO", {"tooltip": "Supported values may vary by model & duration", "advanced": true, "multiselect": false, "options": ["1080p"]}], "movement_amplitude": ["COMBO", {"tooltip": "The movement amplitude of objects in the frame", "advanced": true, "multiselect": false, "options": ["auto", "small", "medium", "large"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image"], "optional": ["prompt", "duration", "seed", "resolution", "movement_amplitude"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ViduImageToVideoNode", "display_name": "Vidu Image To Video Generation", "description": "Generate video from image and optional prompt", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ViduReferenceVideoNode": {"input": {"required": {"model": ["COMBO", {"tooltip": "Model name", "multiselect": false, "options": ["viduq1"]}], "images": ["IMAGE", {"tooltip": "Images to use as references to generate a video with consistent subjects (max 7 images)."}], "prompt": ["STRING", {"tooltip": "A textual description for video generation", "multiline": true}]}, "optional": {"duration": ["INT", {"tooltip": "Duration of the output video in seconds", "default": 5, "min": 5, "max": 5, "step": 1, "display": "number"}], "seed": ["INT", {"tooltip": "Seed for video generation (0 for random)", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video", "multiselect": false, "options": ["16:9", "9:16", "1:1"]}], "resolution": ["COMBO", {"tooltip": "Supported values may vary by model & duration", "advanced": true, "multiselect": false, "options": ["1080p"]}], "movement_amplitude": ["COMBO", {"tooltip": "The movement amplitude of objects in the frame", "advanced": true, "multiselect": false, "options": ["auto", "small", "medium", "large"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "images", "prompt"], "optional": ["duration", "seed", "aspect_ratio", "resolution", "movement_amplitude"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ViduReferenceVideoNode", "display_name": "Vidu Reference To Video Generation", "description": "Generate video from multiple images and a prompt", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ViduStartEndToVideoNode": {"input": {"required": {"model": ["COMBO", {"tooltip": "Model name", "multiselect": false, "options": ["viduq1"]}], "first_frame": ["IMAGE", {"tooltip": "Start frame"}], "end_frame": ["IMAGE", {"tooltip": "End frame"}]}, "optional": {"prompt": ["STRING", {"tooltip": "A textual description for video generation", "multiline": true}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds", "default": 5, "min": 5, "max": 5, "step": 1, "display": "number"}], "seed": ["INT", {"tooltip": "Seed for video generation (0 for random)", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "resolution": ["COMBO", {"tooltip": "Supported values may vary by model & duration", "advanced": true, "multiselect": false, "options": ["1080p"]}], "movement_amplitude": ["COMBO", {"tooltip": "The movement amplitude of objects in the frame", "advanced": true, "multiselect": false, "options": ["auto", "small", "medium", "large"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "first_frame", "end_frame"], "optional": ["prompt", "duration", "seed", "resolution", "movement_amplitude"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ViduStartEndToVideoNode", "display_name": "Vidu Start End To Video Generation", "description": "Generate a video from start and end frames and a prompt", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.4}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Vidu2TextToVideoNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["viduq2"]}], "prompt": ["STRING", {"tooltip": "A textual description for video generation, with a maximum length of 2000 characters.", "multiline": true}], "duration": ["INT", {"default": 5, "min": 1, "max": 10, "step": 1, "display": "slider"}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "9:16", "3:4", "4:3", "1:1"]}], "resolution": ["COMBO", {"advanced": true, "multiselect": false, "options": ["720p", "1080p"]}], "background_music": ["BOOLEAN", {"tooltip": "Whether to add background music to the generated video.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "duration", "seed", "aspect_ratio", "resolution", "background_music"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Vidu2TextToVideoNode", "display_name": "Vidu2 Text-to-Video Generation", "description": "Generate video from a text prompt", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $is1080 := widgets.resolution = \"1080p\";\n $base := $is1080 ? 0.1 : 0.075;\n $perSec := $is1080 ? 0.05 : 0.025;\n {\"type\":\"usd\",\"usd\": $base + $perSec * (widgets.duration - 1)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Vidu2ImageToVideoNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]}], "image": ["IMAGE", {"tooltip": "An image to be used as the start frame of the generated video."}], "prompt": ["STRING", {"tooltip": "An optional text prompt for video generation (max 2000 characters).", "default": "", "multiline": true}], "duration": ["INT", {"default": 5, "min": 1, "max": 10, "step": 1, "display": "slider"}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "resolution": ["COMBO", {"advanced": true, "multiselect": false, "options": ["720p", "1080p"]}], "movement_amplitude": ["COMBO", {"tooltip": "The movement amplitude of objects in the frame.", "advanced": true, "multiselect": false, "options": ["auto", "small", "medium", "large"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image", "prompt", "duration", "seed", "resolution", "movement_amplitude"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Vidu2ImageToVideoNode", "display_name": "Vidu2 Image-to-Video Generation", "description": "Generate a video from an image and an optional prompt.", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $d := widgets.duration;\n $is1080 := widgets.resolution = \"1080p\";\n $contains($m, \"pro-fast\")\n ? (\n $base := $is1080 ? 0.08 : 0.04;\n $perSec := $is1080 ? 0.02 : 0.01;\n {\"type\":\"usd\",\"usd\": $base + $perSec * ($d - 1)}\n )\n : $contains($m, \"pro\")\n ? (\n $base := $is1080 ? 0.275 : 0.075;\n $perSec := $is1080 ? 0.075 : 0.05;\n {\"type\":\"usd\",\"usd\": $base + $perSec * ($d - 1)}\n )\n : $contains($m, \"turbo\")\n ? (\n $is1080\n ? {\"type\":\"usd\",\"usd\": 0.175 + 0.05 * ($d - 1)}\n : (\n $d <= 1 ? {\"type\":\"usd\",\"usd\": 0.04}\n : $d <= 2 ? {\"type\":\"usd\",\"usd\": 0.05}\n : {\"type\":\"usd\",\"usd\": 0.05 + 0.05 * ($d - 2)}\n )\n )\n : {\"type\":\"usd\",\"usd\": 0.04}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Vidu2ReferenceVideoNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["viduq2"]}], "subjects": ["COMFY_AUTOGROW_V3", {"tooltip": "For each subject, provide up to 3 reference images (7 images total across all subjects). Reference them in prompts via @subject{subject_id}.", "template": {"input": {"required": {"reference_images": ["IMAGE", {}]}}, "names": ["subject1", "subject2", "subject3", "subject4", "subject5", "subject6", "subject7"], "min": 1}}], "prompt": ["STRING", {"tooltip": "When enabled, the video will include generated speech and background music based on the prompt.", "multiline": true}], "audio": ["BOOLEAN", {"tooltip": "When enabled video will contain generated speech and background music based on the prompt.", "advanced": true, "default": false}], "duration": ["INT", {"default": 5, "min": 1, "max": 10, "step": 1, "display": "slider"}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "aspect_ratio": ["COMBO", {"multiselect": false, "options": ["16:9", "9:16", "4:3", "3:4", "1:1"]}], "resolution": ["COMBO", {"advanced": true, "multiselect": false, "options": ["720p", "1080p"]}], "movement_amplitude": ["COMBO", {"tooltip": "The movement amplitude of objects in the frame.", "advanced": true, "multiselect": false, "options": ["auto", "small", "medium", "large"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "subjects", "prompt", "audio", "duration", "seed", "aspect_ratio", "resolution", "movement_amplitude"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Vidu2ReferenceVideoNode", "display_name": "Vidu2 Reference-to-Video Generation", "description": "Generate a video from multiple reference images and a prompt.", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "audio", "type": "BOOLEAN"}, {"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $is1080 := widgets.resolution = \"1080p\";\n $base := $is1080 ? 0.375 : 0.125;\n $perSec := $is1080 ? 0.05 : 0.025;\n $audioCost := widgets.audio = true ? 0.075 : 0;\n {\"type\":\"usd\",\"usd\": $base + $perSec * (widgets.duration - 1) + $audioCost}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Vidu2StartEndToVideoNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["viduq2-pro-fast", "viduq2-pro", "viduq2-turbo"]}], "first_frame": ["IMAGE", {}], "end_frame": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt description (max 2000 characters).", "multiline": true}], "duration": ["INT", {"default": 5, "min": 2, "max": 8, "step": 1, "display": "slider"}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "resolution": ["COMBO", {"advanced": true, "multiselect": false, "options": ["720p", "1080p"]}], "movement_amplitude": ["COMBO", {"tooltip": "The movement amplitude of objects in the frame.", "advanced": true, "multiselect": false, "options": ["auto", "small", "medium", "large"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "first_frame", "end_frame", "prompt", "duration", "seed", "resolution", "movement_amplitude"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Vidu2StartEndToVideoNode", "display_name": "Vidu2 Start/End Frame-to-Video Generation", "description": "Generate a video from a start frame, an end frame, and a prompt.", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $d := widgets.duration;\n $is1080 := widgets.resolution = \"1080p\";\n $contains($m, \"pro-fast\")\n ? (\n $base := $is1080 ? 0.08 : 0.04;\n $perSec := $is1080 ? 0.02 : 0.01;\n {\"type\":\"usd\",\"usd\": $base + $perSec * ($d - 1)}\n )\n : $contains($m, \"pro\")\n ? (\n $base := $is1080 ? 0.275 : 0.075;\n $perSec := $is1080 ? 0.075 : 0.05;\n {\"type\":\"usd\",\"usd\": $base + $perSec * ($d - 1)}\n )\n : $contains($m, \"turbo\")\n ? (\n $is1080\n ? {\"type\":\"usd\",\"usd\": 0.175 + 0.05 * ($d - 1)}\n : (\n $d <= 2 ? {\"type\":\"usd\",\"usd\": 0.05}\n : {\"type\":\"usd\",\"usd\": 0.05 + 0.05 * ($d - 2)}\n )\n )\n : {\"type\":\"usd\",\"usd\": 0.04}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ViduExtendVideoNode": {"input": {"required": {"model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for video extension.", "options": [{"key": "viduq2-pro", "inputs": {"required": {"duration": ["INT", {"tooltip": "Duration of the extended video in seconds.", "default": 4, "min": 1, "max": 7, "step": 1, "display": "slider"}], "resolution": ["COMBO", {"tooltip": "Resolution of the output video.", "multiselect": false, "options": ["720p", "1080p"]}]}}}, {"key": "viduq2-turbo", "inputs": {"required": {"duration": ["INT", {"tooltip": "Duration of the extended video in seconds.", "default": 4, "min": 1, "max": 7, "step": 1, "display": "slider"}], "resolution": ["COMBO", {"tooltip": "Resolution of the output video.", "multiselect": false, "options": ["720p", "1080p"]}]}}}]}], "video": ["VIDEO", {"tooltip": "The source video to extend."}], "prompt": ["STRING", {"tooltip": "An optional text prompt for the extended video (max 2000 characters).", "default": "", "multiline": true}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "optional": {"end_frame": ["IMAGE", {}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "video", "prompt", "seed"], "optional": ["end_frame"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ViduExtendVideoNode", "display_name": "Vidu Video Extension", "description": "Extend an existing video by generating additional frames.", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "model.duration", "type": "INT"}, {"name": "model.resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $d := $lookup(widgets, \"model.duration\");\n $res := $lookup(widgets, \"model.resolution\");\n $contains($m, \"pro\")\n ? (\n $base := $lookup({\"720p\": 0.15, \"1080p\": 0.3}, $res);\n $perSec := $lookup({\"720p\": 0.05, \"1080p\": 0.075}, $res);\n {\"type\":\"usd\",\"usd\": $base + $perSec * ($d - 1)}\n )\n : (\n $base := $lookup({\"720p\": 0.075, \"1080p\": 0.2}, $res);\n $perSec := $lookup({\"720p\": 0.025, \"1080p\": 0.05}, $res);\n {\"type\":\"usd\",\"usd\": $base + $perSec * ($d - 1)}\n )\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "ViduMultiFrameVideoNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["viduq2-pro", "viduq2-turbo"]}], "start_image": ["IMAGE", {"tooltip": "The starting frame image. Aspect ratio must be between 1:4 and 4:1."}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "resolution": ["COMBO", {"multiselect": false, "options": ["720p", "1080p"]}], "frames": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Number of keyframe transitions (2-9).", "options": [{"key": "2", "inputs": {"required": {"prompt1": ["STRING", {"tooltip": "Text prompt for frame 1 transition.", "default": "", "multiline": true}], "end_image1": ["IMAGE", {"tooltip": "End frame image for segment 1. Aspect ratio must be between 1:4 and 4:1."}], "duration1": ["INT", {"tooltip": "Duration for segment 1 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt2": ["STRING", {"tooltip": "Text prompt for frame 2 transition.", "default": "", "multiline": true}], "end_image2": ["IMAGE", {"tooltip": "End frame image for segment 2. Aspect ratio must be between 1:4 and 4:1."}], "duration2": ["INT", {"tooltip": "Duration for segment 2 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}]}}}, {"key": "3", "inputs": {"required": {"prompt1": ["STRING", {"tooltip": "Text prompt for frame 1 transition.", "default": "", "multiline": true}], "end_image1": ["IMAGE", {"tooltip": "End frame image for segment 1. Aspect ratio must be between 1:4 and 4:1."}], "duration1": ["INT", {"tooltip": "Duration for segment 1 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt2": ["STRING", {"tooltip": "Text prompt for frame 2 transition.", "default": "", "multiline": true}], "end_image2": ["IMAGE", {"tooltip": "End frame image for segment 2. Aspect ratio must be between 1:4 and 4:1."}], "duration2": ["INT", {"tooltip": "Duration for segment 2 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt3": ["STRING", {"tooltip": "Text prompt for frame 3 transition.", "default": "", "multiline": true}], "end_image3": ["IMAGE", {"tooltip": "End frame image for segment 3. Aspect ratio must be between 1:4 and 4:1."}], "duration3": ["INT", {"tooltip": "Duration for segment 3 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}]}}}, {"key": "4", "inputs": {"required": {"prompt1": ["STRING", {"tooltip": "Text prompt for frame 1 transition.", "default": "", "multiline": true}], "end_image1": ["IMAGE", {"tooltip": "End frame image for segment 1. Aspect ratio must be between 1:4 and 4:1."}], "duration1": ["INT", {"tooltip": "Duration for segment 1 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt2": ["STRING", {"tooltip": "Text prompt for frame 2 transition.", "default": "", "multiline": true}], "end_image2": ["IMAGE", {"tooltip": "End frame image for segment 2. Aspect ratio must be between 1:4 and 4:1."}], "duration2": ["INT", {"tooltip": "Duration for segment 2 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt3": ["STRING", {"tooltip": "Text prompt for frame 3 transition.", "default": "", "multiline": true}], "end_image3": ["IMAGE", {"tooltip": "End frame image for segment 3. Aspect ratio must be between 1:4 and 4:1."}], "duration3": ["INT", {"tooltip": "Duration for segment 3 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt4": ["STRING", {"tooltip": "Text prompt for frame 4 transition.", "default": "", "multiline": true}], "end_image4": ["IMAGE", {"tooltip": "End frame image for segment 4. Aspect ratio must be between 1:4 and 4:1."}], "duration4": ["INT", {"tooltip": "Duration for segment 4 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}]}}}, {"key": "5", "inputs": {"required": {"prompt1": ["STRING", {"tooltip": "Text prompt for frame 1 transition.", "default": "", "multiline": true}], "end_image1": ["IMAGE", {"tooltip": "End frame image for segment 1. Aspect ratio must be between 1:4 and 4:1."}], "duration1": ["INT", {"tooltip": "Duration for segment 1 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt2": ["STRING", {"tooltip": "Text prompt for frame 2 transition.", "default": "", "multiline": true}], "end_image2": ["IMAGE", {"tooltip": "End frame image for segment 2. Aspect ratio must be between 1:4 and 4:1."}], "duration2": ["INT", {"tooltip": "Duration for segment 2 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt3": ["STRING", {"tooltip": "Text prompt for frame 3 transition.", "default": "", "multiline": true}], "end_image3": ["IMAGE", {"tooltip": "End frame image for segment 3. Aspect ratio must be between 1:4 and 4:1."}], "duration3": ["INT", {"tooltip": "Duration for segment 3 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt4": ["STRING", {"tooltip": "Text prompt for frame 4 transition.", "default": "", "multiline": true}], "end_image4": ["IMAGE", {"tooltip": "End frame image for segment 4. Aspect ratio must be between 1:4 and 4:1."}], "duration4": ["INT", {"tooltip": "Duration for segment 4 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt5": ["STRING", {"tooltip": "Text prompt for frame 5 transition.", "default": "", "multiline": true}], "end_image5": ["IMAGE", {"tooltip": "End frame image for segment 5. Aspect ratio must be between 1:4 and 4:1."}], "duration5": ["INT", {"tooltip": "Duration for segment 5 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}]}}}, {"key": "6", "inputs": {"required": {"prompt1": ["STRING", {"tooltip": "Text prompt for frame 1 transition.", "default": "", "multiline": true}], "end_image1": ["IMAGE", {"tooltip": "End frame image for segment 1. Aspect ratio must be between 1:4 and 4:1."}], "duration1": ["INT", {"tooltip": "Duration for segment 1 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt2": ["STRING", {"tooltip": "Text prompt for frame 2 transition.", "default": "", "multiline": true}], "end_image2": ["IMAGE", {"tooltip": "End frame image for segment 2. Aspect ratio must be between 1:4 and 4:1."}], "duration2": ["INT", {"tooltip": "Duration for segment 2 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt3": ["STRING", {"tooltip": "Text prompt for frame 3 transition.", "default": "", "multiline": true}], "end_image3": ["IMAGE", {"tooltip": "End frame image for segment 3. Aspect ratio must be between 1:4 and 4:1."}], "duration3": ["INT", {"tooltip": "Duration for segment 3 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt4": ["STRING", {"tooltip": "Text prompt for frame 4 transition.", "default": "", "multiline": true}], "end_image4": ["IMAGE", {"tooltip": "End frame image for segment 4. Aspect ratio must be between 1:4 and 4:1."}], "duration4": ["INT", {"tooltip": "Duration for segment 4 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt5": ["STRING", {"tooltip": "Text prompt for frame 5 transition.", "default": "", "multiline": true}], "end_image5": ["IMAGE", {"tooltip": "End frame image for segment 5. Aspect ratio must be between 1:4 and 4:1."}], "duration5": ["INT", {"tooltip": "Duration for segment 5 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt6": ["STRING", {"tooltip": "Text prompt for frame 6 transition.", "default": "", "multiline": true}], "end_image6": ["IMAGE", {"tooltip": "End frame image for segment 6. Aspect ratio must be between 1:4 and 4:1."}], "duration6": ["INT", {"tooltip": "Duration for segment 6 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}]}}}, {"key": "7", "inputs": {"required": {"prompt1": ["STRING", {"tooltip": "Text prompt for frame 1 transition.", "default": "", "multiline": true}], "end_image1": ["IMAGE", {"tooltip": "End frame image for segment 1. Aspect ratio must be between 1:4 and 4:1."}], "duration1": ["INT", {"tooltip": "Duration for segment 1 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt2": ["STRING", {"tooltip": "Text prompt for frame 2 transition.", "default": "", "multiline": true}], "end_image2": ["IMAGE", {"tooltip": "End frame image for segment 2. Aspect ratio must be between 1:4 and 4:1."}], "duration2": ["INT", {"tooltip": "Duration for segment 2 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt3": ["STRING", {"tooltip": "Text prompt for frame 3 transition.", "default": "", "multiline": true}], "end_image3": ["IMAGE", {"tooltip": "End frame image for segment 3. Aspect ratio must be between 1:4 and 4:1."}], "duration3": ["INT", {"tooltip": "Duration for segment 3 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt4": ["STRING", {"tooltip": "Text prompt for frame 4 transition.", "default": "", "multiline": true}], "end_image4": ["IMAGE", {"tooltip": "End frame image for segment 4. Aspect ratio must be between 1:4 and 4:1."}], "duration4": ["INT", {"tooltip": "Duration for segment 4 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt5": ["STRING", {"tooltip": "Text prompt for frame 5 transition.", "default": "", "multiline": true}], "end_image5": ["IMAGE", {"tooltip": "End frame image for segment 5. Aspect ratio must be between 1:4 and 4:1."}], "duration5": ["INT", {"tooltip": "Duration for segment 5 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt6": ["STRING", {"tooltip": "Text prompt for frame 6 transition.", "default": "", "multiline": true}], "end_image6": ["IMAGE", {"tooltip": "End frame image for segment 6. Aspect ratio must be between 1:4 and 4:1."}], "duration6": ["INT", {"tooltip": "Duration for segment 6 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt7": ["STRING", {"tooltip": "Text prompt for frame 7 transition.", "default": "", "multiline": true}], "end_image7": ["IMAGE", {"tooltip": "End frame image for segment 7. Aspect ratio must be between 1:4 and 4:1."}], "duration7": ["INT", {"tooltip": "Duration for segment 7 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}]}}}, {"key": "8", "inputs": {"required": {"prompt1": ["STRING", {"tooltip": "Text prompt for frame 1 transition.", "default": "", "multiline": true}], "end_image1": ["IMAGE", {"tooltip": "End frame image for segment 1. Aspect ratio must be between 1:4 and 4:1."}], "duration1": ["INT", {"tooltip": "Duration for segment 1 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt2": ["STRING", {"tooltip": "Text prompt for frame 2 transition.", "default": "", "multiline": true}], "end_image2": ["IMAGE", {"tooltip": "End frame image for segment 2. Aspect ratio must be between 1:4 and 4:1."}], "duration2": ["INT", {"tooltip": "Duration for segment 2 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt3": ["STRING", {"tooltip": "Text prompt for frame 3 transition.", "default": "", "multiline": true}], "end_image3": ["IMAGE", {"tooltip": "End frame image for segment 3. Aspect ratio must be between 1:4 and 4:1."}], "duration3": ["INT", {"tooltip": "Duration for segment 3 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt4": ["STRING", {"tooltip": "Text prompt for frame 4 transition.", "default": "", "multiline": true}], "end_image4": ["IMAGE", {"tooltip": "End frame image for segment 4. Aspect ratio must be between 1:4 and 4:1."}], "duration4": ["INT", {"tooltip": "Duration for segment 4 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt5": ["STRING", {"tooltip": "Text prompt for frame 5 transition.", "default": "", "multiline": true}], "end_image5": ["IMAGE", {"tooltip": "End frame image for segment 5. Aspect ratio must be between 1:4 and 4:1."}], "duration5": ["INT", {"tooltip": "Duration for segment 5 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt6": ["STRING", {"tooltip": "Text prompt for frame 6 transition.", "default": "", "multiline": true}], "end_image6": ["IMAGE", {"tooltip": "End frame image for segment 6. Aspect ratio must be between 1:4 and 4:1."}], "duration6": ["INT", {"tooltip": "Duration for segment 6 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt7": ["STRING", {"tooltip": "Text prompt for frame 7 transition.", "default": "", "multiline": true}], "end_image7": ["IMAGE", {"tooltip": "End frame image for segment 7. Aspect ratio must be between 1:4 and 4:1."}], "duration7": ["INT", {"tooltip": "Duration for segment 7 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt8": ["STRING", {"tooltip": "Text prompt for frame 8 transition.", "default": "", "multiline": true}], "end_image8": ["IMAGE", {"tooltip": "End frame image for segment 8. Aspect ratio must be between 1:4 and 4:1."}], "duration8": ["INT", {"tooltip": "Duration for segment 8 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}]}}}, {"key": "9", "inputs": {"required": {"prompt1": ["STRING", {"tooltip": "Text prompt for frame 1 transition.", "default": "", "multiline": true}], "end_image1": ["IMAGE", {"tooltip": "End frame image for segment 1. Aspect ratio must be between 1:4 and 4:1."}], "duration1": ["INT", {"tooltip": "Duration for segment 1 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt2": ["STRING", {"tooltip": "Text prompt for frame 2 transition.", "default": "", "multiline": true}], "end_image2": ["IMAGE", {"tooltip": "End frame image for segment 2. Aspect ratio must be between 1:4 and 4:1."}], "duration2": ["INT", {"tooltip": "Duration for segment 2 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt3": ["STRING", {"tooltip": "Text prompt for frame 3 transition.", "default": "", "multiline": true}], "end_image3": ["IMAGE", {"tooltip": "End frame image for segment 3. Aspect ratio must be between 1:4 and 4:1."}], "duration3": ["INT", {"tooltip": "Duration for segment 3 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt4": ["STRING", {"tooltip": "Text prompt for frame 4 transition.", "default": "", "multiline": true}], "end_image4": ["IMAGE", {"tooltip": "End frame image for segment 4. Aspect ratio must be between 1:4 and 4:1."}], "duration4": ["INT", {"tooltip": "Duration for segment 4 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt5": ["STRING", {"tooltip": "Text prompt for frame 5 transition.", "default": "", "multiline": true}], "end_image5": ["IMAGE", {"tooltip": "End frame image for segment 5. Aspect ratio must be between 1:4 and 4:1."}], "duration5": ["INT", {"tooltip": "Duration for segment 5 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt6": ["STRING", {"tooltip": "Text prompt for frame 6 transition.", "default": "", "multiline": true}], "end_image6": ["IMAGE", {"tooltip": "End frame image for segment 6. Aspect ratio must be between 1:4 and 4:1."}], "duration6": ["INT", {"tooltip": "Duration for segment 6 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt7": ["STRING", {"tooltip": "Text prompt for frame 7 transition.", "default": "", "multiline": true}], "end_image7": ["IMAGE", {"tooltip": "End frame image for segment 7. Aspect ratio must be between 1:4 and 4:1."}], "duration7": ["INT", {"tooltip": "Duration for segment 7 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt8": ["STRING", {"tooltip": "Text prompt for frame 8 transition.", "default": "", "multiline": true}], "end_image8": ["IMAGE", {"tooltip": "End frame image for segment 8. Aspect ratio must be between 1:4 and 4:1."}], "duration8": ["INT", {"tooltip": "Duration for segment 8 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}], "prompt9": ["STRING", {"tooltip": "Text prompt for frame 9 transition.", "default": "", "multiline": true}], "end_image9": ["IMAGE", {"tooltip": "End frame image for segment 9. Aspect ratio must be between 1:4 and 4:1."}], "duration9": ["INT", {"tooltip": "Duration for segment 9 in seconds.", "default": 4, "min": 2, "max": 7, "step": 1, "display": "slider"}]}}}]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "start_image", "seed", "resolution", "frames"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "ViduMultiFrameVideoNode", "display_name": "Vidu Multi-Frame Video Generation", "description": "Generate a video with multiple keyframe transitions.", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}, {"name": "resolution", "type": "COMBO"}, {"name": "frames", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "frames.duration1", "type": "INT"}, {"name": "frames.duration2", "type": "INT"}, {"name": "frames.duration3", "type": "INT"}, {"name": "frames.duration4", "type": "INT"}, {"name": "frames.duration5", "type": "INT"}, {"name": "frames.duration6", "type": "INT"}, {"name": "frames.duration7", "type": "INT"}, {"name": "frames.duration8", "type": "INT"}, {"name": "frames.duration9", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $m := widgets.model;\n $n := $number(widgets.frames);\n $is1080 := widgets.resolution = \"1080p\";\n $d1 := $lookup(widgets, \"frames.duration1\");\n $d2 := $lookup(widgets, \"frames.duration2\");\n $d3 := $n >= 3 ? $lookup(widgets, \"frames.duration3\") : 0;\n $d4 := $n >= 4 ? $lookup(widgets, \"frames.duration4\") : 0;\n $d5 := $n >= 5 ? $lookup(widgets, \"frames.duration5\") : 0;\n $d6 := $n >= 6 ? $lookup(widgets, \"frames.duration6\") : 0;\n $d7 := $n >= 7 ? $lookup(widgets, \"frames.duration7\") : 0;\n $d8 := $n >= 8 ? $lookup(widgets, \"frames.duration8\") : 0;\n $d9 := $n >= 9 ? $lookup(widgets, \"frames.duration9\") : 0;\n $totalDuration := $d1 + $d2 + $d3 + $d4 + $d5 + $d6 + $d7 + $d8 + $d9;\n $contains($m, \"pro\")\n ? (\n $base := $is1080 ? 0.3 : 0.15;\n $perSec := $is1080 ? 0.075 : 0.05;\n {\"type\":\"usd\",\"usd\": $n * $base + $perSec * $totalDuration}\n )\n : (\n $base := $is1080 ? 0.2 : 0.075;\n $perSec := $is1080 ? 0.05 : 0.025;\n {\"type\":\"usd\",\"usd\": $n * $base + $perSec * $totalDuration}\n )\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Vidu3TextToVideoNode": {"input": {"required": {"model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for video generation.", "options": [{"key": "viduq3-pro", "inputs": {"required": {"aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video.", "multiselect": false, "options": ["16:9", "9:16", "3:4", "4:3", "1:1"]}], "resolution": ["COMBO", {"tooltip": "Resolution of the output video.", "multiselect": false, "options": ["720p", "1080p"]}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds.", "default": 5, "min": 1, "max": 16, "step": 1, "display": "slider"}], "audio": ["BOOLEAN", {"tooltip": "When enabled, outputs video with sound (including dialogue and sound effects).", "default": false}]}}}, {"key": "viduq3-turbo", "inputs": {"required": {"aspect_ratio": ["COMBO", {"tooltip": "The aspect ratio of the output video.", "multiselect": false, "options": ["16:9", "9:16", "3:4", "4:3", "1:1"]}], "resolution": ["COMBO", {"tooltip": "Resolution of the output video.", "multiselect": false, "options": ["720p", "1080p"]}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds.", "default": 5, "min": 1, "max": 16, "step": 1, "display": "slider"}], "audio": ["BOOLEAN", {"tooltip": "When enabled, outputs video with sound (including dialogue and sound effects).", "default": false}]}}}]}], "prompt": ["STRING", {"tooltip": "A textual description for video generation, with a maximum length of 2000 characters.", "multiline": true}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Vidu3TextToVideoNode", "display_name": "Vidu Q3 Text-to-Video Generation", "description": "Generate video from a text prompt.", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "model.duration", "type": "INT"}, {"name": "model.resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $res := $lookup(widgets, \"model.resolution\");\n $d := $lookup(widgets, \"model.duration\");\n $contains(widgets.model, \"turbo\")\n ? (\n $rate := $lookup({\"720p\": 0.06, \"1080p\": 0.08}, $res);\n {\"type\":\"usd\",\"usd\": $rate * $d}\n )\n : (\n $rate := $lookup({\"720p\": 0.15, \"1080p\": 0.16}, $res);\n {\"type\":\"usd\",\"usd\": $rate * $d}\n )\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Vidu3ImageToVideoNode": {"input": {"required": {"model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for video generation.", "options": [{"key": "viduq3-pro", "inputs": {"required": {"resolution": ["COMBO", {"tooltip": "Resolution of the output video.", "multiselect": false, "options": ["720p", "1080p", "2K"]}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds.", "default": 5, "min": 1, "max": 16, "step": 1, "display": "slider"}], "audio": ["BOOLEAN", {"tooltip": "When enabled, outputs video with sound (including dialogue and sound effects).", "default": false}]}}}, {"key": "viduq3-turbo", "inputs": {"required": {"resolution": ["COMBO", {"tooltip": "Resolution of the output video.", "multiselect": false, "options": ["720p", "1080p"]}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds.", "default": 5, "min": 1, "max": 16, "step": 1, "display": "slider"}], "audio": ["BOOLEAN", {"tooltip": "When enabled, outputs video with sound (including dialogue and sound effects).", "default": false}]}}}]}], "image": ["IMAGE", {"tooltip": "An image to be used as the start frame of the generated video."}], "prompt": ["STRING", {"tooltip": "An optional text prompt for video generation (max 2000 characters).", "default": "", "multiline": true}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image", "prompt", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Vidu3ImageToVideoNode", "display_name": "Vidu Q3 Image-to-Video Generation", "description": "Generate a video from an image and an optional prompt.", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "model.duration", "type": "INT"}, {"name": "model.resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $res := $lookup(widgets, \"model.resolution\");\n $d := $lookup(widgets, \"model.duration\");\n $contains(widgets.model, \"turbo\")\n ? (\n $rate := $lookup({\"720p\": 0.06, \"1080p\": 0.08}, $res);\n {\"type\":\"usd\",\"usd\": $rate * $d}\n )\n : (\n $rate := $lookup({\"720p\": 0.15, \"1080p\": 0.16, \"2k\": 0.2}, $res);\n {\"type\":\"usd\",\"usd\": $rate * $d}\n )\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "Vidu3StartEndToVideoNode": {"input": {"required": {"model": ["COMFY_DYNAMICCOMBO_V3", {"tooltip": "Model to use for video generation.", "options": [{"key": "viduq3-pro", "inputs": {"required": {"resolution": ["COMBO", {"tooltip": "Resolution of the output video.", "multiselect": false, "options": ["720p", "1080p"]}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds.", "default": 5, "min": 1, "max": 16, "step": 1, "display": "slider"}], "audio": ["BOOLEAN", {"tooltip": "When enabled, outputs video with sound (including dialogue and sound effects).", "default": false}]}}}, {"key": "viduq3-turbo", "inputs": {"required": {"resolution": ["COMBO", {"tooltip": "Resolution of the output video.", "multiselect": false, "options": ["720p", "1080p"]}], "duration": ["INT", {"tooltip": "Duration of the output video in seconds.", "default": 5, "min": 1, "max": 16, "step": 1, "display": "slider"}], "audio": ["BOOLEAN", {"tooltip": "When enabled, outputs video with sound (including dialogue and sound effects).", "default": false}]}}}]}], "first_frame": ["IMAGE", {}], "end_frame": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt description (max 2000 characters).", "multiline": true}], "seed": ["INT", {"default": 1, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "first_frame", "end_frame", "prompt", "seed"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "Vidu3StartEndToVideoNode", "display_name": "Vidu Q3 Start/End Frame-to-Video Generation", "description": "Generate a video from a start frame, an end frame, and a prompt.", "python_module": "comfy_api_nodes.nodes_vidu", "category": "api node/video/Vidu", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMFY_DYNAMICCOMBO_V3"}, {"name": "model.duration", "type": "INT"}, {"name": "model.resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $res := $lookup(widgets, \"model.resolution\");\n $d := $lookup(widgets, \"model.duration\");\n $contains(widgets.model, \"turbo\")\n ? (\n $rate := $lookup({\"720p\": 0.06, \"1080p\": 0.08}, $res);\n {\"type\":\"usd\",\"usd\": $rate * $d}\n )\n : (\n $rate := $lookup({\"720p\": 0.15, \"1080p\": 0.16}, $res);\n {\"type\":\"usd\",\"usd\": $rate * $d}\n )\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanTextToImageApi": {"input": {"required": {"model": ["COMBO", {"tooltip": "Model to use.", "default": "wan2.5-t2i-preview", "multiselect": false, "options": ["wan2.5-t2i-preview"]}], "prompt": ["STRING", {"tooltip": "Prompt describing the elements and visual features. Supports English and Chinese.", "default": "", "multiline": true}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "Negative prompt describing what to avoid.", "default": "", "multiline": true}], "width": ["INT", {"default": 1024, "min": 768, "max": 1440, "step": 32}], "height": ["INT", {"default": 1024, "min": 768, "max": 1440, "step": 32}], "seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "prompt_extend": ["BOOLEAN", {"tooltip": "Whether to enhance the prompt with AI assistance.", "advanced": true, "default": true}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an AI-generated watermark to the result.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt"], "optional": ["negative_prompt", "width", "height", "seed", "prompt_extend", "watermark"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "WanTextToImageApi", "display_name": "Wan Text to Image", "description": "Generates an image based on a text prompt.", "python_module": "comfy_api_nodes.nodes_wan", "category": "api node/image/Wan", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.03}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanImageToImageApi": {"input": {"required": {"model": ["COMBO", {"tooltip": "Model to use.", "default": "wan2.5-i2i-preview", "multiselect": false, "options": ["wan2.5-i2i-preview"]}], "image": ["IMAGE", {"tooltip": "Single-image editing or multi-image fusion. Maximum 2 images."}], "prompt": ["STRING", {"tooltip": "Prompt describing the elements and visual features. Supports English and Chinese.", "default": "", "multiline": true}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "Negative prompt describing what to avoid.", "default": "", "multiline": true}], "seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an AI-generated watermark to the result.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image", "prompt"], "optional": ["negative_prompt", "seed", "watermark"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "WanImageToImageApi", "display_name": "Wan Image to Image", "description": "Generates an image from one or two input images and a text prompt. The output image is currently fixed at 1.6 MP, and its aspect ratio matches the input image(s).", "python_module": "comfy_api_nodes.nodes_wan", "category": "api node/image/Wan", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [], "inputs": [], "input_groups": []}, "expr": "{\"type\":\"usd\",\"usd\":0.03}"}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanTextToVideoApi": {"input": {"required": {"model": ["COMBO", {"tooltip": "Model to use.", "default": "wan2.6-t2v", "multiselect": false, "options": ["wan2.5-t2v-preview", "wan2.6-t2v"]}], "prompt": ["STRING", {"tooltip": "Prompt describing the elements and visual features. Supports English and Chinese.", "default": "", "multiline": true}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "Negative prompt describing what to avoid.", "default": "", "multiline": true}], "size": ["COMBO", {"default": "720p: 1:1 (960x960)", "multiselect": false, "options": ["480p: 1:1 (624x624)", "480p: 16:9 (832x480)", "480p: 9:16 (480x832)", "720p: 1:1 (960x960)", "720p: 16:9 (1280x720)", "720p: 9:16 (720x1280)", "720p: 4:3 (1088x832)", "720p: 3:4 (832x1088)", "1080p: 1:1 (1440x1440)", "1080p: 16:9 (1920x1080)", "1080p: 9:16 (1080x1920)", "1080p: 4:3 (1632x1248)", "1080p: 3:4 (1248x1632)"]}], "duration": ["INT", {"tooltip": "A 15-second duration is available only for the Wan 2.6 model.", "default": 5, "min": 5, "max": 15, "step": 5, "display": "number"}], "audio": ["AUDIO", {"tooltip": "Audio must contain a clear, loud voice, without extraneous noise or background music."}], "seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "generate_audio": ["BOOLEAN", {"tooltip": "If no audio input is provided, generate audio automatically.", "advanced": true, "default": false}], "prompt_extend": ["BOOLEAN", {"tooltip": "Whether to enhance the prompt with AI assistance.", "advanced": true, "default": true}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an AI-generated watermark to the result.", "advanced": true, "default": false}], "shot_type": ["COMBO", {"tooltip": "Specifies the shot type for the generated video, that is, whether the video is a single continuous shot or multiple shots with cuts. This parameter takes effect only when prompt_extend is True.", "advanced": true, "multiselect": false, "options": ["single", "multi"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt"], "optional": ["negative_prompt", "size", "duration", "audio", "seed", "generate_audio", "prompt_extend", "watermark", "shot_type"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "WanTextToVideoApi", "display_name": "Wan Text to Video", "description": "Generates a video based on a text prompt.", "python_module": "comfy_api_nodes.nodes_wan", "category": "api node/video/Wan", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "INT"}, {"name": "size", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $ppsTable := { \"480p\": 0.05, \"720p\": 0.1, \"1080p\": 0.15 };\n $resKey := $substringBefore(widgets.size, \":\");\n $pps := $lookup($ppsTable, $resKey);\n { \"type\": \"usd\", \"usd\": $round($pps * widgets.duration, 2) }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanImageToVideoApi": {"input": {"required": {"model": ["COMBO", {"tooltip": "Model to use.", "default": "wan2.6-i2v", "multiselect": false, "options": ["wan2.5-i2v-preview", "wan2.6-i2v"]}], "image": ["IMAGE", {}], "prompt": ["STRING", {"tooltip": "Prompt describing the elements and visual features. Supports English and Chinese.", "default": "", "multiline": true}]}, "optional": {"negative_prompt": ["STRING", {"tooltip": "Negative prompt describing what to avoid.", "default": "", "multiline": true}], "resolution": ["COMBO", {"default": "720P", "multiselect": false, "options": ["480P", "720P", "1080P"]}], "duration": ["INT", {"tooltip": "Duration 15 available only for WAN2.6 model.", "default": 5, "min": 5, "max": 15, "step": 5, "display": "number"}], "audio": ["AUDIO", {"tooltip": "Audio must contain a clear, loud voice, without extraneous noise or background music."}], "seed": ["INT", {"tooltip": "Seed to use for generation.", "default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "generate_audio": ["BOOLEAN", {"tooltip": "If no audio input is provided, generate audio automatically.", "advanced": true, "default": false}], "prompt_extend": ["BOOLEAN", {"tooltip": "Whether to enhance the prompt with AI assistance.", "advanced": true, "default": true}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an AI-generated watermark to the result.", "advanced": true, "default": false}], "shot_type": ["COMBO", {"tooltip": "Specifies the shot type for the generated video, that is, whether the video is a single continuous shot or multiple shots with cuts. This parameter takes effect only when prompt_extend is True.", "advanced": true, "multiselect": false, "options": ["single", "multi"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image", "prompt"], "optional": ["negative_prompt", "resolution", "duration", "audio", "seed", "generate_audio", "prompt_extend", "watermark", "shot_type"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "WanImageToVideoApi", "display_name": "Wan Image to Video", "description": "Generates a video from the first frame and a text prompt.", "python_module": "comfy_api_nodes.nodes_wan", "category": "api node/video/Wan", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "duration", "type": "INT"}, {"name": "resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $ppsTable := { \"480p\": 0.05, \"720p\": 0.1, \"1080p\": 0.15 };\n $pps := $lookup($ppsTable, widgets.resolution);\n { \"type\": \"usd\", \"usd\": $round($pps * widgets.duration, 2) }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WanReferenceVideoApi": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["wan2.6-r2v"]}], "prompt": ["STRING", {"tooltip": "Prompt describing the elements and visual features. Supports English and Chinese. Use identifiers such as `character1` and `character2` to refer to the reference characters.", "default": "", "multiline": true}], "negative_prompt": ["STRING", {"tooltip": "Negative prompt describing what to avoid.", "default": "", "multiline": true}], "reference_videos": ["COMFY_AUTOGROW_V3", {"template": {"input": {"required": {"reference_video": ["VIDEO", {}]}}, "names": ["character1", "character2", "character3"], "min": 1}}], "size": ["COMBO", {"multiselect": false, "options": ["720p: 1:1 (960x960)", "720p: 16:9 (1280x720)", "720p: 9:16 (720x1280)", "720p: 4:3 (1088x832)", "720p: 3:4 (832x1088)", "1080p: 1:1 (1440x1440)", "1080p: 16:9 (1920x1080)", "1080p: 9:16 (1080x1920)", "1080p: 4:3 (1632x1248)", "1080p: 3:4 (1248x1632)"]}], "duration": ["INT", {"default": 5, "min": 5, "max": 10, "step": 5, "display": "slider"}], "seed": ["INT", {"default": 0, "min": 0, "max": 2147483647, "step": 1, "control_after_generate": true, "display": "number"}], "shot_type": ["COMBO", {"tooltip": "Specifies the shot type for the generated video, that is, whether the video is a single continuous shot or multiple shots with cuts.", "advanced": true, "multiselect": false, "options": ["single", "multi"]}], "watermark": ["BOOLEAN", {"tooltip": "Whether to add an AI-generated watermark to the result.", "advanced": true, "default": false}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "prompt", "negative_prompt", "reference_videos", "size", "duration", "seed", "shot_type", "watermark"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "WanReferenceVideoApi", "display_name": "Wan Reference to Video", "description": "Use the character and voice from input videos, combined with a prompt, to generate a new video that maintains character consistency.", "python_module": "comfy_api_nodes.nodes_wan", "category": "api node/video/Wan", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "size", "type": "COMBO"}, {"name": "duration", "type": "INT"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $rate := $contains(widgets.size, \"1080p\") ? 0.15 : 0.10;\n $inputMin := 2 * $rate;\n $inputMax := 5 * $rate;\n $outputPrice := widgets.duration * $rate;\n {\n \"type\": \"range_usd\",\n \"min_usd\": $inputMin + $outputPrice,\n \"max_usd\": $inputMax + $outputPrice\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WavespeedFlashVSRNode": {"input": {"required": {"video": ["VIDEO", {}], "target_resolution": ["COMBO", {"multiselect": false, "options": ["720p", "1080p", "2K", "4K"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["video", "target_resolution"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["VIDEO"], "output_is_list": [false], "output_name": ["VIDEO"], "output_tooltips": [null], "output_matchtypes": null, "name": "WavespeedFlashVSRNode", "display_name": "FlashVSR Video Upscale", "description": "Fast, high-quality video upscaler that boosts resolution and restores clarity for low-resolution or blurry footage.", "python_module": "comfy_api_nodes.nodes_wavespeed", "category": "api node/video/WaveSpeed", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "target_resolution", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $price_for_1sec := {\"720p\": 0.012, \"1080p\": 0.018, \"2k\": 0.024, \"4k\": 0.032};\n {\n \"type\":\"usd\",\n \"usd\": $lookup($price_for_1sec, widgets.target_resolution),\n \"format\":{\"suffix\": \"/second\", \"approximate\": true}\n }\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "WavespeedImageUpscaleNode": {"input": {"required": {"model": ["COMBO", {"multiselect": false, "options": ["SeedVR2", "Ultimate"]}], "image": ["IMAGE", {}], "target_resolution": ["COMBO", {"multiselect": false, "options": ["2K", "4K", "8K"]}]}, "hidden": {"auth_token_comfy_org": ["AUTH_TOKEN_COMFY_ORG"], "api_key_comfy_org": ["API_KEY_COMFY_ORG"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["model", "image", "target_resolution"], "hidden": ["auth_token_comfy_org", "api_key_comfy_org", "unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "WavespeedImageUpscaleNode", "display_name": "WaveSpeed Image Upscale", "description": "Boost image resolution and quality, upscaling photos to 4K or 8K for sharp, detailed results.", "python_module": "comfy_api_nodes.nodes_wavespeed", "category": "api node/image/WaveSpeed", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": true, "price_badge": {"engine": "jsonata", "depends_on": {"widgets": [{"name": "model", "type": "COMBO"}], "inputs": [], "input_groups": []}, "expr": "\n (\n $prices := {\"seedvr2\": 0.01, \"ultimate\": 0.06};\n {\"type\":\"usd\", \"usd\": $lookup($prices, widgets.model)}\n )\n "}, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "AutoCropFaces": {"input": {"required": {"image": ["IMAGE"], "number_of_faces": ["INT", {"default": 5, "min": 1, "max": 100, "step": 1}], "scale_factor": ["FLOAT", {"default": 1.5, "min": 0.5, "max": 10, "step": 0.5, "display": "slider"}], "shift_factor": ["FLOAT", {"default": 0.45, "min": 0, "max": 1, "step": 0.01, "display": "slider"}], "start_index": ["INT", {"default": 0, "step": 1, "display": "number"}], "max_faces_per_image": ["INT", {"default": 50, "min": 1, "max": 1000, "step": 1}], "aspect_ratio": [["9:16", "2:3", "3:4", "4:5", "1:1", "5:4", "4:3", "3:2", "16:9"], {"default": "1:1"}]}}, "input_order": {"required": ["image", "number_of_faces", "scale_factor", "shift_factor", "start_index", "max_faces_per_image", "aspect_ratio"]}, "is_input_list": false, "output": ["IMAGE", "CROP_DATA"], "output_is_list": [false, false], "output_name": ["face"], "name": "AutoCropFaces", "display_name": "Auto Crop Faces", "description": "", "python_module": "custom_nodes.ComfyUI-AutoCropFaces", "category": "Faces", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Primitive boolean [Crystools]": {"input": {"required": {"boolean": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["boolean"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["boolean"], "name": "Primitive boolean [Crystools]", "display_name": "\ud83e\ude9b Primitive boolean", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Primitive", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Primitive string [Crystools]": {"input": {"required": {"string": ["STRING", {"default": ""}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["string"], "name": "Primitive string [Crystools]", "display_name": "\ud83e\ude9b Primitive string", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Primitive", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Primitive string multiline [Crystools]": {"input": {"required": {"string": ["STRING", {"multiline": true, "default": ""}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["string"], "name": "Primitive string multiline [Crystools]", "display_name": "\ud83e\ude9b Primitive string multiline", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Primitive", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Primitive integer [Crystools]": {"input": {"required": {"int": ["INT", {"default": 1, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["int"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["int"], "name": "Primitive integer [Crystools]", "display_name": "\ud83e\ude9b Primitive integer", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Primitive", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Primitive float [Crystools]": {"input": {"required": {"float": ["FLOAT", {"default": 1, "min": -1.7976931348623157e+308, "max": 1.7976931348623157e+308, "step": 0.01}]}}, "input_order": {"required": ["float"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["float"], "name": "Primitive float [Crystools]", "display_name": "\ud83e\ude9b Primitive float", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Primitive", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Show any [Crystools]": {"input": {"required": {}, "optional": {"any_value": ["*"], "console": ["BOOLEAN", {"default": false}], "display": ["BOOLEAN", {"default": true}], "prefix": ["STRING", {"default": ""}]}, "hidden": {}}, "input_order": {"required": [], "optional": ["any_value", "console", "display", "prefix"], "hidden": []}, "is_input_list": true, "output": [], "output_is_list": [], "output_name": [], "name": "Show any [Crystools]", "display_name": "\ud83e\ude9b Show any value to console/display", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Debugger", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Show any to JSON [Crystools]": {"input": {"required": {}, "optional": {"any_value": ["*"]}}, "input_order": {"required": [], "optional": ["any_value"]}, "is_input_list": true, "output": ["STRING"], "output_is_list": [false], "output_name": ["string"], "name": "Show any to JSON [Crystools]", "display_name": "\ud83e\ude9b Show any to JSON", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Debugger", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "List of any [Crystools]": {"input": {"required": {}, "optional": {"any_1": ["*"], "any_2": ["*"], "any_3": ["*"], "any_4": ["*"], "any_5": ["*"], "any_6": ["*"], "any_7": ["*"], "any_8": ["*"]}}, "input_order": {"required": [], "optional": ["any_1", "any_2", "any_3", "any_4", "any_5", "any_6", "any_7", "any_8"]}, "is_input_list": false, "output": [["*"]], "output_is_list": [true], "output_name": ["any_list"], "name": "List of any [Crystools]", "display_name": "\ud83e\ude9b List of any", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/List", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "List of strings [Crystools]": {"input": {"required": {}, "optional": {"string_1": ["STRING", {"default": ""}], "string_2": ["STRING", {"default": ""}], "string_3": ["STRING", {"default": ""}], "string_4": ["STRING", {"default": ""}], "string_5": ["STRING", {"default": ""}], "string_6": ["STRING", {"default": ""}], "string_7": ["STRING", {"default": ""}], "string_8": ["STRING", {"default": ""}], "delimiter": ["STRING", {"default": " "}]}}, "input_order": {"required": [], "optional": ["string_1", "string_2", "string_3", "string_4", "string_5", "string_6", "string_7", "string_8", "delimiter"]}, "is_input_list": false, "output": ["STRING", "ListString"], "output_is_list": [false, true], "output_name": ["concatenated", "list_string"], "name": "List of strings [Crystools]", "display_name": "\ud83e\ude9b List of strings", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/List", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Switch from any [Crystools]": {"input": {"required": {"any": ["*"], "boolean": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["any", "boolean"]}, "is_input_list": false, "output": ["*", "*"], "output_is_list": [false, false], "output_name": ["on_true", "on_false"], "name": "Switch from any [Crystools]", "display_name": "\ud83e\ude9b Switch from any", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Switch", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Switch any [Crystools]": {"input": {"required": {"on_true": ["*", {"lazy": true}], "on_false": ["*", {"lazy": true}], "boolean": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["on_true", "on_false", "boolean"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "Switch any [Crystools]", "display_name": "\ud83e\ude9b Switch any", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Switch", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Switch latent [Crystools]": {"input": {"required": {"on_true": ["LATENT", {"lazy": true}], "on_false": ["LATENT", {"lazy": true}], "boolean": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["on_true", "on_false", "boolean"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["latent"], "name": "Switch latent [Crystools]", "display_name": "\ud83e\ude9b Switch latent", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Switch", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Switch conditioning [Crystools]": {"input": {"required": {"on_true": ["CONDITIONING", {"lazy": true}], "on_false": ["CONDITIONING", {"lazy": true}], "boolean": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["on_true", "on_false", "boolean"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["conditioning"], "name": "Switch conditioning [Crystools]", "display_name": "\ud83e\ude9b Switch conditioning", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Switch", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Switch image [Crystools]": {"input": {"required": {"on_true": ["IMAGE", {"lazy": true}], "on_false": ["IMAGE", {"lazy": true}], "boolean": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["on_true", "on_false", "boolean"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "Switch image [Crystools]", "display_name": "\ud83e\ude9b Switch image", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Switch", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Switch mask [Crystools]": {"input": {"required": {"on_true": ["MASK", {"lazy": true}], "on_false": ["MASK", {"lazy": true}], "boolean": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["on_true", "on_false", "boolean"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["mask"], "name": "Switch mask [Crystools]", "display_name": "\ud83e\ude9b Switch mask", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Switch", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Switch string [Crystools]": {"input": {"required": {"on_true": ["STRING", {"default": "", "lazy": true}], "on_false": ["STRING", {"default": "", "lazy": true}], "boolean": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["on_true", "on_false", "boolean"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["string"], "name": "Switch string [Crystools]", "display_name": "\ud83e\ude9b Switch string", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Switch", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Pipe to/edit any [Crystools]": {"input": {"required": {}, "optional": {"CPipeAny": ["CPipeAny"], "any_1": ["*"], "any_2": ["*"], "any_3": ["*"], "any_4": ["*"], "any_5": ["*"], "any_6": ["*"]}}, "input_order": {"required": [], "optional": ["CPipeAny", "any_1", "any_2", "any_3", "any_4", "any_5", "any_6"]}, "is_input_list": false, "output": ["CPipeAny"], "output_is_list": [false], "output_name": ["CPipeAny"], "name": "Pipe to/edit any [Crystools]", "display_name": "\ud83e\ude9b Pipe to/edit any", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Pipe from any [Crystools]": {"input": {"required": {"CPipeAny": ["CPipeAny"]}, "optional": {}}, "input_order": {"required": ["CPipeAny"], "optional": []}, "is_input_list": false, "output": ["CPipeAny", "*", "*", "*", "*", "*", "*"], "output_is_list": [false, false, false, false, false, false, false], "output_name": ["CPipeAny", "any_1", "any_2", "any_3", "any_4", "any_5", "any_6"], "name": "Pipe from any [Crystools]", "display_name": "\ud83e\ude9b Pipe from any", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Load image with metadata [Crystools]": {"input": {"required": {"image": [["2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"], {"image_upload": true}]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "JSON", "METADATA_RAW"], "output_is_list": [false, false, false, false], "output_name": ["image", "mask", "prompt", "Metadata RAW"], "name": "Load image with metadata [Crystools]", "display_name": "\ud83e\ude9b Load image with metadata", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Get resolution [Crystools]": {"input": {"required": {"image": ["IMAGE"]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["image"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["INT", "INT"], "output_is_list": [false, false], "output_name": ["width", "height"], "name": "Get resolution [Crystools]", "display_name": "\ud83e\ude9b Get resolution", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Preview from image [Crystools]": {"input": {"required": {}, "optional": {"image": ["IMAGE"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": [], "optional": ["image"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["METADATA_RAW"], "output_is_list": [false], "output_name": ["Metadata RAW"], "name": "Preview from image [Crystools]", "display_name": "\ud83e\ude9b Preview from image", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": ["preview", "preview image", "show image", "view image", "display image", "image viewer"], "essentials_category": "Basics"}, "Preview from metadata [Crystools]": {"input": {"required": {}, "optional": {"metadata_raw": ["METADATA_RAW", {"forceInput": true}]}}, "input_order": {"required": [], "optional": ["metadata_raw"]}, "is_input_list": false, "output": ["METADATA_RAW"], "output_is_list": [false], "output_name": ["Metadata RAW"], "name": "Preview from metadata [Crystools]", "display_name": "\ud83e\ude9b Preview from metadata", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": ["preview", "preview image", "show image", "view image", "display image", "image viewer"], "essentials_category": "Basics"}, "Save image with extra metadata [Crystools]": {"input": {"required": {"image": ["IMAGE"], "filename_prefix": ["STRING", {"default": "ComfyUI"}], "with_workflow": ["BOOLEAN", {"default": true}]}, "optional": {"metadata_extra": ["STRING", {"multiline": true, "default": "{\n \"Title\": \"Image generated by Crystian\",\n \"Description\": \"More info: https:\\/\\/www.instagram.com\\/crystian.ia\",\n \"Author\": \"crystian.ia\",\n \"Software\": \"ComfyUI\",\n \"Category\": \"StableDiffusion\",\n \"Rating\": 5,\n \"UserComment\": \"\",\n \"Keywords\": [\n \"\"\n ],\n \"Copyrights\": \"\"\n}"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["image", "filename_prefix", "with_workflow"], "optional": ["metadata_extra"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["METADATA_RAW"], "output_is_list": [false], "output_name": ["Metadata RAW"], "name": "Save image with extra metadata [Crystools]", "display_name": "\ud83e\ude9b Save image with extra metadata", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": ["save", "save image", "export image", "output image", "write image", "download"], "essentials_category": "Basics"}, "Metadata extractor [Crystools]": {"input": {"required": {"metadata_raw": ["METADATA_RAW", {"forceInput": true}]}, "optional": {}}, "input_order": {"required": ["metadata_raw"], "optional": []}, "is_input_list": false, "output": ["JSON", "JSON", "JSON", "JSON", "STRING", "STRING"], "output_is_list": [false, false, false, false, false, false], "output_name": ["prompt", "workflow", "file info", "raw to JSON", "raw to property", "raw to csv"], "name": "Metadata extractor [Crystools]", "display_name": "\ud83e\ude9b Metadata extractor", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Metadata", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Metadata comparator [Crystools]": {"input": {"required": {"metadata_raw_old": ["METADATA_RAW", {"forceInput": true}], "metadata_raw_new": ["METADATA_RAW", {"forceInput": true}], "what": [["Prompt", "Workflow", "Fileinfo"]]}, "optional": {}}, "input_order": {"required": ["metadata_raw_old", "metadata_raw_new", "what"], "optional": []}, "is_input_list": false, "output": ["JSON"], "output_is_list": [false], "output_name": ["diff"], "name": "Metadata comparator [Crystools]", "display_name": "\ud83e\ude9b Metadata comparator", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Metadata", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "JSON comparator [Crystools]": {"input": {"required": {"json_old": ["JSON", {"forceInput": true}], "json_new": ["JSON", {"forceInput": true}]}, "optional": {}}, "input_order": {"required": ["json_old", "json_new"], "optional": []}, "is_input_list": false, "output": ["JSON"], "output_is_list": [false], "output_name": ["json_compared"], "name": "JSON comparator [Crystools]", "display_name": "\ud83e\ude9b JSON comparator", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Utils", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Stats system [Crystools]": {"input": {"required": {"latent": ["LATENT"]}}, "input_order": {"required": ["latent"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["latent"], "name": "Stats system [Crystools]", "display_name": "\ud83e\ude9b Stats system (powered by WAS)", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Utils", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Read JSON file [Crystools]": {"input": {"required": {}, "optional": {"path_to_json": ["STRING", {"default": ""}]}}, "input_order": {"required": [], "optional": ["path_to_json"]}, "is_input_list": false, "output": ["JSON"], "output_is_list": [false], "output_name": ["json"], "name": "Read JSON file [Crystools]", "display_name": "\ud83e\ude9b Read JSON file (BETA)", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Utils", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "JSON extractor [Crystools]": {"input": {"required": {"json": ["JSON", {"forceInput": true}]}, "optional": {"key": ["STRING", {"default": ""}], "default": ["STRING", {"default": ""}]}}, "input_order": {"required": ["json"], "optional": ["key", "default"]}, "is_input_list": false, "output": ["*", "STRING", "INT", "FLOAT", "BOOLEAN"], "output_is_list": [false, false, false, false, false], "output_name": ["any", "string", "int", "float", "boolean"], "name": "JSON extractor [Crystools]", "display_name": "\ud83e\ude9b JSON extractor (BETA)", "description": "", "python_module": "custom_nodes.ComfyUI-Crystools", "category": "crystools \ud83e\ude9b/Utils", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LoraLoader|pysssss": {"input": {"required": {"model": ["MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}], "clip": ["CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}], "lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"tooltip": "The name of the LoRA."}], "strength_model": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}], "strength_clip": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}]}, "optional": {"prompt": ["STRING", {"hidden": true}]}}, "input_order": {"required": ["model", "clip", "lora_name", "strength_model", "strength_clip"], "optional": ["prompt"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "STRING"], "output_is_list": [false, false, false], "output_name": ["MODEL", "CLIP", "example"], "name": "LoraLoader|pysssss", "display_name": "Lora Loader \ud83d\udc0d", "description": "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "loaders", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model.", "The modified CLIP model."], "search_aliases": ["lora", "load lora", "apply lora", "lora loader", "lora model"], "essentials_category": "Image Generation"}, "CheckpointLoader|pysssss": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"], {"tooltip": "The name of the checkpoint (model) to load."}]}, "optional": {"prompt": ["STRING", {"hidden": true}]}}, "input_order": {"required": ["ckpt_name"], "optional": ["prompt"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "VAE", "STRING"], "output_is_list": [false, false, false, false], "output_name": ["MODEL", "CLIP", "VAE", "example"], "name": "CheckpointLoader|pysssss", "display_name": "Checkpoint Loader \ud83d\udc0d", "description": "Loads a diffusion model checkpoint, diffusion models are used to denoise latents.", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "loaders", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The model used for denoising latents.", "The CLIP model used for encoding text prompts.", "The VAE model used for encoding and decoding images to and from latent space."], "search_aliases": ["load model", "checkpoint", "model loader", "load checkpoint", "ckpt", "model"]}, "ConstrainImage|pysssss": {"input": {"required": {"images": ["IMAGE"], "max_width": ["INT", {"default": 1024, "min": 0}], "max_height": ["INT", {"default": 1024, "min": 0}], "min_width": ["INT", {"default": 0, "min": 0}], "min_height": ["INT", {"default": 0, "min": 0}], "crop_if_required": [["yes", "no"], {"default": "no"}]}}, "input_order": {"required": ["images", "max_width", "max_height", "min_width", "min_height", "crop_if_required"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "name": "ConstrainImage|pysssss", "display_name": "Constrain Image \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConstrainImageforVideo|pysssss": {"input": {"required": {"images": ["IMAGE"], "max_width": ["INT", {"default": 1024, "min": 0}], "max_height": ["INT", {"default": 1024, "min": 0}], "min_width": ["INT", {"default": 0, "min": 0}], "min_height": ["INT", {"default": 0, "min": 0}], "crop_if_required": [["yes", "no"], {"default": "no"}]}}, "input_order": {"required": ["images", "max_width", "max_height", "min_width", "min_height", "crop_if_required"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ConstrainImageforVideo|pysssss", "display_name": "Constrain Image for Video \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MathExpression|pysssss": {"input": {"required": {"expression": ["STRING", {"multiline": true, "dynamicPrompts": false, "pysssss.autocomplete": {"words": [{"text": "round", "value": "round()", "showValue": false, "hint": "number, dp? = 0", "caretOffset": -1}, {"text": "ceil", "value": "ceil()", "showValue": false, "hint": "number", "caretOffset": -1}, {"text": "floor", "value": "floor()", "showValue": false, "hint": "number", "caretOffset": -1}, {"text": "min", "value": "min()", "showValue": false, "hint": "...numbers", "caretOffset": -1}, {"text": "max", "value": "max()", "showValue": false, "hint": "...numbers", "caretOffset": -1}, {"text": "randomint", "value": "randomint()", "showValue": false, "hint": "min, max", "caretOffset": -1}, {"text": "randomchoice", "value": "randomchoice()", "showValue": false, "hint": "...numbers", "caretOffset": -1}, {"text": "sqrt", "value": "sqrt()", "showValue": false, "hint": "number", "caretOffset": -1}, {"text": "int", "value": "int()", "showValue": false, "hint": "number", "caretOffset": -1}, {"text": "iif", "value": "iif()", "showValue": false, "hint": "value, truepart, falsepart", "caretOffset": -1}], "separator": ""}}]}, "optional": {"a": ["*"], "b": ["*"], "c": ["*"]}, "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", "prompt": "PROMPT"}}, "input_order": {"required": ["expression"], "optional": ["a", "b", "c"], "hidden": ["extra_pnginfo", "prompt"]}, "is_input_list": false, "output": ["INT", "FLOAT"], "output_is_list": [false, false], "output_name": ["INT", "FLOAT"], "name": "MathExpression|pysssss", "display_name": "Math Expression \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "utils", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "PlaySound|pysssss": {"input": {"required": {"any": ["*", {}], "mode": [["always", "on empty queue"], {}], "volume": ["FLOAT", {"min": 0, "max": 1, "step": 0.1, "default": 0.5}], "file": ["STRING", {"default": "notify.mp3"}]}}, "input_order": {"required": ["any", "mode", "volume", "file"]}, "is_input_list": true, "output": ["*"], "output_is_list": [true], "output_name": ["*"], "name": "PlaySound|pysssss", "display_name": "PlaySound \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "utils", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Repeater|pysssss": {"input": {"required": {"source": ["*", {}], "repeats": ["INT", {"min": 0, "max": 5000, "default": 2}], "output": [["single", "multi"], {}], "node_mode": [["reuse", "create"], {}]}}, "input_order": {"required": ["source", "repeats", "output", "node_mode"]}, "is_input_list": false, "output": ["*"], "output_is_list": [true], "output_name": ["*"], "name": "Repeater|pysssss", "display_name": "Repeater \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "utils", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ReroutePrimitive|pysssss": {"input": {"required": {"value": ["*"]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "ReroutePrimitive|pysssss", "display_name": "Reroute Primitive \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "__hidden__", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ShowText|pysssss": {"input": {"required": {"text": ["STRING", {"forceInput": true}]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["text"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": true, "output": ["STRING"], "output_is_list": [true], "output_name": ["STRING"], "name": "ShowText|pysssss", "display_name": "Show Text \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "utils", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "StringFunction|pysssss": {"input": {"required": {"action": [["append", "replace"], {}], "tidy_tags": [["yes", "no"], {}]}, "optional": {"text_a": ["STRING", {"multiline": true, "dynamicPrompts": false}], "text_b": ["STRING", {"multiline": true, "dynamicPrompts": false}], "text_c": ["STRING", {"multiline": true, "dynamicPrompts": false}]}}, "input_order": {"required": ["action", "tidy_tags"], "optional": ["text_a", "text_b", "text_c"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "StringFunction|pysssss", "display_name": "String Function \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "utils", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SystemNotification|pysssss": {"input": {"required": {"message": ["STRING", {"default": "Your notification has triggered."}], "any": ["*", {}], "mode": [["always", "on empty queue"], {}]}}, "input_order": {"required": ["message", "any", "mode"]}, "is_input_list": true, "output": ["*"], "output_is_list": [true], "output_name": ["*"], "name": "SystemNotification|pysssss", "display_name": "SystemNotification \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "utils", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "LoadText|pysssss": {"input": {"required": {"root_dir": [["input", "output", "temp"], {}], "file": [["[none]"], {"pysssss.binding": [{"source": "root_dir", "callback": [{"type": "set", "target": "$this.disabled", "value": true}, {"type": "fetch", "url": "/pysssss/text-file/{$source.value}", "then": [{"type": "set", "target": "$this.options.values", "value": "$result"}, {"type": "validate-combo"}, {"type": "set", "target": "$this.disabled", "value": false}]}]}]}]}}, "input_order": {"required": ["root_dir", "file"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "LoadText|pysssss", "display_name": "Load Text \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "utils", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SaveText|pysssss": {"input": {"required": {"root_dir": [["input", "output", "temp"], {}], "file": ["STRING", {"default": "file.txt"}], "append": [["append", "overwrite", "new only"], {}], "insert": ["BOOLEAN", {"default": true, "label_on": "new line", "label_off": "none", "pysssss.binding": [{"source": "append", "callback": [{"type": "if", "condition": [{"left": "$source.value", "op": "eq", "right": "\"append\""}], "true": [{"type": "set", "target": "$this.disabled", "value": false}], "false": [{"type": "set", "target": "$this.disabled", "value": true}]}]}]}], "text": ["STRING", {"forceInput": true, "multiline": true}]}}, "input_order": {"required": ["root_dir", "file", "append", "insert", "text"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "SaveText|pysssss", "display_name": "Save Text \ud83d\udc0d", "description": "", "python_module": "custom_nodes.ComfyUI-Custom-Scripts", "category": "utils", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy showLoaderSettingsNames": {"input": {"required": {"pipe": ["PIPE_LINE"]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["pipe"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["ckpt_name", "vae_name", "lora_name"], "name": "easy showLoaderSettingsNames", "display_name": "Show Loader Settings Names", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Util", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy sliderControl": {"input": {"required": {"mode": [["ipadapter layer weights"]], "model_type": [["sdxl", "sd1"]]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["mode", "model_type"], "hidden": ["prompt", "my_unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["layer_weights"], "name": "easy sliderControl", "display_name": "Easy Slider Control", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ckptNames": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]]}}, "input_order": {"required": ["ckpt_name"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["ckpt_name"], "name": "easy ckptNames", "display_name": "Ckpt Names", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy controlnetNames": {"input": {"required": {"controlnet_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]]}}, "input_order": {"required": ["controlnet_name"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["controlnet_name"], "name": "easy controlnetNames", "display_name": "ControlNet Names", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy loraNames": {"input": {"required": {"lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]]}}, "input_order": {"required": ["lora_name"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["lora_name"], "name": "easy loraNames", "display_name": "Lora Names", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy tableEditor": {"input": {"required": {"table_data": ["EASY_TABLE_EDITOR"]}}, "input_order": {"required": ["table_data"]}, "is_input_list": false, "output": ["STRING", "IMAGE"], "output_is_list": [false, false], "output_name": ["markdown", "image"], "name": "easy tableEditor", "display_name": "Table Editor", "description": "\u901a\u8fc7\u53ef\u89c6\u5316\u8868\u683c\u6216 Markdown \u8bed\u6cd5\u7f16\u8f91\u6570\u636e\uff0c\u8f93\u51fa Markdown \u683c\u5f0f\u7684\u8868\u683c\u5b57\u7b26\u4e32\u3002", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy seed": {"input": {"required": {"seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["seed"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["seed"], "name": "easy seed", "display_name": "EasySeed", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Seed", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy seedList": {"input": {"required": {"min_num": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}], "max_num": ["INT", {"default": 1125899906842624, "max": 1125899906842624, "min": 0}], "method": [["random", "increment", "decrement"], {"default": "random"}], "total": ["INT", {"default": 1, "min": 1, "max": 100000}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["min_num", "max_num", "method", "total", "seed"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["INT", "INT"], "output_is_list": [false, false], "output_name": ["seed", "total"], "name": "easy seedList", "display_name": "EasySeedList", "description": "Random number seed that can be used in a for loop, by connecting index and easy indexAny node to realize different seed values in the loop.", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Seed", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy globalSeed": {"input": {"required": {"value": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}], "mode": ["BOOLEAN", {"default": true, "label_on": "control_before_generate", "label_off": "control_after_generate"}], "action": [["fixed", "increment", "decrement", "randomize", "increment for each node", "decrement for each node", "randomize for each node"]], "last_seed": ["STRING", {"default": ""}]}}, "input_order": {"required": ["value", "mode", "action", "last_seed"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "easy globalSeed", "display_name": "EasyGlobalSeed", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Seed", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy positive": {"input": {"required": {"positive": ["STRING", {"default": "", "multiline": true, "placeholder": "Positive"}]}}, "input_order": {"required": ["positive"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["positive"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy positive", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy negative": {"input": {"required": {"negative": ["STRING", {"default": "", "multiline": true, "placeholder": "Negative"}]}}, "input_order": {"required": ["negative"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["negative"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy negative", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy wildcards": {"input": {"required": {"text": ["STRING", {"default": "", "multiline": true, "placeholder": "(Support wildcard)", "dynamicPrompts": false}], "Select to add LoRA": ["COMBO", {"multiselect": false, "options": ["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]}], "Select to add Wildcard": ["COMBO", {"multiselect": false, "options": ["Select the Wildcard to add to the text", "__example__"]}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}], "multiline_mode": ["BOOLEAN", {"default": false}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["text", "Select to add LoRA", "Select to add Wildcard", "seed", "multiline_mode"], "hidden": ["prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["STRING", "STRING"], "output_is_list": [true, true], "output_name": ["text", "populated_text"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy wildcards", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy wildcardsMatrix": {"input": {"required": {"text": ["STRING", {"default": "", "multiline": true, "placeholder": "(Support Lora Block Weight and wildcard)", "dynamicPrompts": false}], "Select to add LoRA": ["COMBO", {"multiselect": false, "options": ["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]}], "Select to add Wildcard": ["COMBO", {"multiselect": false, "options": ["Select the Wildcard to add to the text", "__example__"]}], "offset": ["INT", {"default": 0, "min": 0, "max": 1125899906842624, "step": 1, "control_after_generate": true}]}, "optional": {"output_limit": ["INT", {"tooltip": "Output All Probilities", "default": 1, "min": -1, "step": 1}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["text", "Select to add LoRA", "Select to add Wildcard", "offset"], "optional": ["output_limit"], "hidden": ["prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["STRING", "INT", "INT"], "output_is_list": [true, false, true], "output_name": ["populated_text", "total", "factors"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "easy wildcardsMatrix", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy prompt": {"input": {"required": {"text": ["STRING", {"default": "", "multiline": true, "placeholder": "Prompt"}], "prefix": ["COMBO", {"default": "Select the prefix add to the text", "multiselect": false, "options": ["Select the prefix add to the text", "Detailed photo of", "Amateur photo of", "Flicker 2008 photo of", "Fantastic artwork of", "Vintage photograph of", "Unreal 5 render of", "Surrealist painting of", "Professional advertising design of"]}], "subject": ["COMBO", {"default": "\ud83d\udc64Select the subject add to the text", "multiselect": false, "options": ["\ud83d\udc64Select the subject add to the text", "a man", "a woman", "a young man", "a young woman", "a handsome man", "a beautiful woman", "a monster", "a toy", "a product", "a buddha", "a dog", "a cat"]}], "action": ["COMBO", {"default": "\ud83c\udfacSelect the action add to the text", "multiselect": false, "options": ["\ud83c\udfacSelect the action add to the text", "looking at viewer", "looking away", "looking up", "looking down", "looking back", "open mouth", "half-closed mouth", "closed mouth", "open eyes", "half-closed eyes", "closed eyes", "wink", "standing", "sitting", "lying", "walking", "running", "adjusting hair", "waving", "hand on hip", "crossed arms", "smile", "sad", "angry", "sleepy", "tired", "expressionless"]}], "clothes": ["COMBO", {"default": "\ud83d\udc5aSelect the clothes add to the text", "multiselect": false, "options": ["\ud83d\udc5aSelect the clothes add to the text", "underwear", "clothed", "casual", "dress", "swimsuit", "uniform", "bikini", "one-piece swimsuit", "shirt", "blouse", "sweater", "hoodie", "jeans", "pants", "shorts", "skirt", "vest", "coat", "trenchoat", "jacket", "short dress", "long dress", "off-shoulder", "backless", "hairbow", "hair ribbon", "hair tie", "hairband", "cap", "beanie", "bucket hat", "sun hat", "straw hat", "rice hat", "witch hat", "crown", "chain necklace", "tooth necklace", "choker", "pendant", "bracelet", "watch", "ring", "earring", "anklet", "belt", "scarf", "gloves", "mittens", "socks", "stockings", "tights", "leggings", "boots", "sneakers", "heels", "sandals", "flip-flops", "slippers", "loafers", "mules", "oxfords", "brogues", "derbies", "monk shoes", "chelsea boots", "combat boots", "riding boots", "rain boots", "wedge heels", "platform heels", "stilettos", "block heels", "kitten heels", "moccasins", "espadrilles", "pumps", "flats", "ballet flats", "mary janes", "slingbacks", "peep-toe", "mule sandals", "gladiator sandals", "thong sandals", "slide sandals", "espadrille sandals", "wedge sandals", "platform sandals", "ankle boots", "knee-high boots", "over-the-knee boots", "thigh-high boots", "wellington boots", "chukka boots", "desert boots", "chelsea boots", "hiking boots", "work boots", "snow boots", "rain boots", "riding boots", "cowboy boots", "combat boots", "biker boots", "duck boots", "military boots", "western boots", "ankle strap heels", "block heels", "chunky heels", "cone heels", "kitten heels", "platform heels", "pumps", "slingback heels", "stiletto heels", "wedge heels", "mules", "slingbacks", "slides", "thong sandals", "gladiator sandals", "espadrilles", "wedge sandals", "platform sandals", "ankle boots", "knee-high boots", "over-the-knee boots", "thigh-high boots", "wellington boots", "chukka boots", "desert boots", "chelsea boots", "hiking boots", "work boots", "snow boots", "rain boots", "riding boots", "cowboy boots", "combat boots", "biker boots", "duck boots", "military boots", "western boots", "ankle strap heels", "block heels"]}], "environment": ["COMBO", {"default": "\u2600\ufe0fSelect the illumination environment add to the text", "multiselect": false, "options": ["\u2600\ufe0fSelect the illumination environment add to the text", "sunshine from window", "neon night, city", "sunset over sea", "golden time", "sci-fi RGB glowing, cyberpunk", "natural lighting", "warm atmosphere, at home, bedroom", "magic lit", "evil, gothic, in a cave", "light and shadow", "shadow from window", "soft studio lighting", "home atmosphere, cozy bedroom illumination", "neon, Wong Kar-wai, warm", "moonlight through curtains", "stormy sky lighting", "underwater glow, deep sea", "foggy forest at dawn", "golden hour in a meadow", "rainbow reflections, neon", "cozy candlelight", "apocalyptic, smoky atmosphere", "red glow, emergency lights", "mystical glow, enchanted forest", "campfire light", "harsh, industrial lighting", "sunrise in the mountains", "evening glow in the desert", "moonlight in a dark alley", "golden glow at a fairground", "midnight in the forest", "purple and pink hues at twilight", "foggy morning, muted light", "candle-lit room, rustic vibe", "fluorescent office lighting", "lightning flash in storm", "night, cozy warm light from fireplace", "ethereal glow, magical forest", "dusky evening on a beach", "afternoon light filtering through trees", "blue neon light, urban street", "red and blue police lights in rain", "aurora borealis glow, arctic landscape", "sunrise through foggy mountains", "golden hour on a city skyline", "mysterious twilight, heavy mist", "early morning rays, forest clearing", "colorful lantern light at festival", "soft glow through stained glass", "harsh spotlight in dark room", "mellow evening glow on a lake", "crystal reflections in a cave", "vibrant autumn lighting in a forest", "gentle snowfall at dusk", "hazy light of a winter morning", "soft, diffused foggy glow", "underwater luminescence", "rain-soaked reflections in city lights", "golden sunlight streaming through trees", "fireflies lighting up a summer night", "glowing embers from a forge", "dim candlelight in a gothic castle", "midnight sky with bright starlight", "warm sunset in a rural village", "flickering light in a haunted house", "desert sunset with mirage-like glow", "golden beams piercing through storm clouds"]}], "background": ["COMBO", {"default": "\ud83c\udf9e\ufe0fSelect the background add to the text", "multiselect": false, "options": ["\ud83c\udf9e\ufe0fSelect the background add to the text", "cars and people", "a cozy bed and a lamp", "a forest clearing with mist", "a bustling marketplace", "a quiet beach at dusk", "an old, cobblestone street", "a futuristic cityscape", "a tranquil lake with mountains", "a mysterious cave entrance", "bookshelves and plants in the background", "an ancient temple in ruins", "tall skyscrapers and neon signs", "a starry sky over a desert", "a bustling caf\u00e9", "rolling hills and farmland", "a modern living room with a fireplace", "an abandoned warehouse", "a picturesque mountain range", "a starry night sky", "the interior of a futuristic spaceship", "the cluttered workshop of an inventor", "the glowing embers of a bonfire", "a misty lake surrounded by trees", "an ornate palace hall", "a busy street market", "a vast desert landscape", "a peaceful library corner", "bustling train station", "a mystical, enchanted forest", "an underwater reef with colorful fish", "a quiet rural village", "a sandy beach with palm trees", "a vibrant coral reef, teeming with life", "snow-capped mountains in distance", "a stormy ocean, waves crashing", "a rustic barn in open fields", "a futuristic lab with glowing screens", "a dark, abandoned castle", "the ruins of an ancient civilization", "a bustling urban street in rain", "an elegant grand ballroom", "a sprawling field of wildflowers", "a dense jungle with sunlight filtering through", "a dimly lit, vintage bar", "an ice cave with sparkling crystals", "a serene riverbank at sunset", "a narrow alley with graffiti walls", "a peaceful zen garden with koi pond", "a high-tech control room", "a quiet mountain village at dawn", "a lighthouse on a rocky coast", "a rainy street with flickering lights", "a frozen lake with ice formations", "an abandoned theme park", "a small fishing village on a pier", "rolling sand dunes in a desert", "a dense forest with towering redwoods", "a snowy cabin in the mountains", "a mystical cave with bioluminescent plants", "a castle courtyard under moonlight", "a bustling open-air night market", "an old train station with steam", "a tranquil waterfall surrounded by trees", "a vineyard in the countryside", "a quaint medieval village", "a bustling harbor with boats", "a high-tech futuristic mall", "a lush tropical rainforest"]}], "nsfw": ["COMBO", {"default": "\ud83d\udd1e\ufe0fSelect the nsfw add to the text", "multiselect": false, "options": ["\ud83d\udd1eSelect the nsfw add to the text", "nude", "breast", "small breast", "middle breast", "large breast", "nipples", "clothes lift", "pussy juice trail", "pussy juice puddle", "small testicles", "medium testicles", "large testicles", "disembodied penis", "cum on body", "cum inside", "cum outside", "fingering", "handjob", "fellatio", "licking penis", "paizuri", "doggystyle", "cowgirl", "reversed cowgirl", "piledriver", "suspended congress", "full nelson"]}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["text", "prefix", "subject", "action", "clothes", "environment", "background", "nsfw"], "hidden": ["prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["prompt"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy prompt", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy promptList": {"input": {"required": {"prompt_1": ["STRING", {"default": "", "multiline": true}], "prompt_2": ["STRING", {"default": "", "multiline": true}], "prompt_3": ["STRING", {"default": "", "multiline": true}], "prompt_4": ["STRING", {"default": "", "multiline": true}], "prompt_5": ["STRING", {"default": "", "multiline": true}]}, "optional": {"optional_prompt_list": ["LIST", {}]}}, "input_order": {"required": ["prompt_1", "prompt_2", "prompt_3", "prompt_4", "prompt_5"], "optional": ["optional_prompt_list"]}, "is_input_list": false, "output": ["LIST", "STRING"], "output_is_list": [false, true], "output_name": ["prompt_list", "prompt_strings"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy promptList", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy promptLine": {"input": {"required": {"prompt": ["STRING", {"default": "text", "multiline": true}], "start_index": ["INT", {"default": 0, "min": 0, "max": 9999}], "max_rows": ["INT", {"default": 1000, "min": 1, "max": 9999}], "remove_empty_lines": ["BOOLEAN", {"default": true}]}, "hidden": {"prompt": ["PROMPT"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["prompt", "start_index", "max_rows", "remove_empty_lines"], "hidden": ["prompt", "unique_id"]}, "is_input_list": false, "output": ["STRING", "COMBO"], "output_is_list": [true, true], "output_name": ["STRING", "COMBO"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy promptLine", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy promptAwait": {"input": {"required": {"now": ["*", {}], "prompt": ["STRING", {"default": "", "multiline": true, "placeholder": "Enter a prompt or use voice to enter to text"}], "toolbar": ["EASY_PROMPT_AWAIT_BAR", {}]}, "optional": {"prev": ["*", {}]}, "hidden": {"prompt": ["PROMPT"], "unique_id": ["UNIQUE_ID"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["now", "prompt", "toolbar"], "optional": ["prev"], "hidden": ["prompt", "unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["*", "STRING", "BOOLEAN", "INT"], "output_is_list": [false, false, false, false], "output_name": ["output", "prompt", "continue", "seed"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "easy promptAwait", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy promptConcat": {"input": {"required": {}, "optional": {"prompt1": ["STRING", {"default": "", "forceInput": true, "multiline": false}], "prompt2": ["STRING", {"default": "", "forceInput": true, "multiline": false}], "separator": ["STRING", {"default": "", "multiline": false}]}}, "input_order": {"required": [], "optional": ["prompt1", "prompt2", "separator"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["prompt"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy promptConcat", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy promptReplace": {"input": {"required": {"prompt": ["STRING", {"default": "", "forceInput": true, "multiline": true}]}, "optional": {"find1": ["STRING", {"default": "", "multiline": false}], "replace1": ["STRING", {"default": "", "multiline": false}], "find2": ["STRING", {"default": "", "multiline": false}], "replace2": ["STRING", {"default": "", "multiline": false}], "find3": ["STRING", {"default": "", "multiline": false}], "replace3": ["STRING", {"default": "", "multiline": false}]}}, "input_order": {"required": ["prompt"], "optional": ["find1", "replace1", "find2", "replace2", "find3", "replace3"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["prompt"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy promptReplace", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy stylesSelector": {"input": {"required": {"styles": ["COMBO", {"default": "fooocus_styles", "multiselect": false, "options": ["fooocus_styles"]}]}, "optional": {"positive": ["STRING", {"default": "", "forceInput": true, "multiline": false}], "negative": ["STRING", {"default": "", "forceInput": true, "multiline": false}], "select_styles": ["EASY_PROMPT_STYLES", {}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["styles"], "optional": ["positive", "negative", "select_styles"], "hidden": ["prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["STRING", "STRING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy stylesSelector", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy portraitMaster": {"input": {"required": {"shot": ["COMBO", {"multiselect": false, "options": ["-", "Head portrait", "Head and shoulders portrait", "Half-length portrait", "Full-length portrait", "Face", "Portrait", "Full body", "Close-up"]}], "shot_weight": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "gender": ["COMBO", {"default": "Woman", "multiselect": false, "options": ["-", "Man", "Woman"]}], "age": ["INT", {"default": 30, "min": 18, "max": 90, "step": 1, "display": "slider"}], "nationality_1": ["COMBO", {"default": "Chinese", "multiselect": false, "options": ["-", "Afghan", "Albanian", "Algerian", "Andorran", "Angolan", "Antiguans Barbudans", "Argentine", "Armenian", "Australian", "Austrian", "Azerbaijani", "Bahamian", "Bahraini", "Bangladeshi", "Barbadian", "Belarusian", "Belgian", "Belizean", "Beninese", "Bhutanese", "Bolivian", "Bosnian Herzegovinian", "Brazilian", "British", "Bruneian", "Bulgarian", "Burkinabe", "Burundian", "Cambodian", "Cameroonian", "Canadian", "Cape Verdian", "Central African", "Chadian", "Chilean", "Chinese", "Colombian", "Comoran", "Congolese", "Costa Rican", "Croatian", "Cuban", "Cypriot", "Czech", "Danish", "Djibouti", "Dominican", "Dutch", "East Timorese", "Ecuadorean", "Egyptian", "Emirian", "Equatorial Guinean", "Eritrean", "Estonian", "Ethiopian", "Fijian", "Filipino", "Finnish", "French", "Gabonese", "Gambian", "Georgian", "German", "Ghanaian", "Greek", "Grenadian", "Guatemalan", "Guinean", "Guyanese", "Haitian", "Herzegovinian", "Honduran", "Hungarian", "Icelander", "Indian", "Indonesian", "Iranian", "Iraqi", "Irish", "Israeli", "Italian", "Ivorian", "Jamaican", "Japanese", "Jordanian", "Kazakhstani", "Kenyan", "Kiribati", "North Korean", "South Korean", "Kuwaiti", "Kyrgyz", "Laotian", "Latvian", "Lebanese", "Liberian", "Libyan", "Liechtensteiner", "Lithuanian", "Luxembourgish", "Macedonian", "Malagasy", "Malawian", "Malaysian", "Maldivan", "Malian", "Maltese", "Marshallese", "Mauritanian", "Mauritian", "Mexican", "Micronesian", "Moldovan", "Monegasque", "Mongolian", "Montenegrin", "Moroccan", "Mosotho", "Motswana", "Mozambican", "Namibian", "Nauruan", "Nepalese", "New Zealander", "Ni-Vanuatu", "Nicaraguan", "Nigerian", "Nigerien", "North Korean", "Northern Irish", "Norwegian", "Omani", "Pakistani", "Palauan", "Palestinian", "Panamanian", "Papua New Guinean", "Paraguayan", "Peruvian", "Polish", "Portuguese", "Qatari", "Romanian", "Russian", "Rwandan", "Saint Lucian", "Salvadoran", "Samoan", "San Marinese", "Sao Tomean", "Saudi", "Scottish", "Senegalese", "Serbian", "Seychellois", "Sierra Leonean", "Singaporean", "Slovakian", "Slovenian", "Solomon Islander", "Somali", "South African", "South Korean", "South Sudanese", "Spanish", "Sri Lankan", "Sudanese", "Surinamer", "Swazi", "Swedish", "Swiss", "Syrian", "Tajikistani", "Tanzanian", "Thai", "Togolese", "Tongan", "Trinidadian Tobagonian", "Tunisian", "Turkish", "Turkmen", "Tuvaluan", "Ugandan", "Ukrainian", "Uruguayan", "Uzbekistani", "Venezuelan", "Vietnamese", "Welsh", "Yemeni", "Zambian", "Zimbabwean"]}], "nationality_2": ["COMBO", {"multiselect": false, "options": ["-", "Afghan", "Albanian", "Algerian", "Andorran", "Angolan", "Antiguans Barbudans", "Argentine", "Armenian", "Australian", "Austrian", "Azerbaijani", "Bahamian", "Bahraini", "Bangladeshi", "Barbadian", "Belarusian", "Belgian", "Belizean", "Beninese", "Bhutanese", "Bolivian", "Bosnian Herzegovinian", "Brazilian", "British", "Bruneian", "Bulgarian", "Burkinabe", "Burundian", "Cambodian", "Cameroonian", "Canadian", "Cape Verdian", "Central African", "Chadian", "Chilean", "Chinese", "Colombian", "Comoran", "Congolese", "Costa Rican", "Croatian", "Cuban", "Cypriot", "Czech", "Danish", "Djibouti", "Dominican", "Dutch", "East Timorese", "Ecuadorean", "Egyptian", "Emirian", "Equatorial Guinean", "Eritrean", "Estonian", "Ethiopian", "Fijian", "Filipino", "Finnish", "French", "Gabonese", "Gambian", "Georgian", "German", "Ghanaian", "Greek", "Grenadian", "Guatemalan", "Guinean", "Guyanese", "Haitian", "Herzegovinian", "Honduran", "Hungarian", "Icelander", "Indian", "Indonesian", "Iranian", "Iraqi", "Irish", "Israeli", "Italian", "Ivorian", "Jamaican", "Japanese", "Jordanian", "Kazakhstani", "Kenyan", "Kiribati", "North Korean", "South Korean", "Kuwaiti", "Kyrgyz", "Laotian", "Latvian", "Lebanese", "Liberian", "Libyan", "Liechtensteiner", "Lithuanian", "Luxembourgish", "Macedonian", "Malagasy", "Malawian", "Malaysian", "Maldivan", "Malian", "Maltese", "Marshallese", "Mauritanian", "Mauritian", "Mexican", "Micronesian", "Moldovan", "Monegasque", "Mongolian", "Montenegrin", "Moroccan", "Mosotho", "Motswana", "Mozambican", "Namibian", "Nauruan", "Nepalese", "New Zealander", "Ni-Vanuatu", "Nicaraguan", "Nigerian", "Nigerien", "North Korean", "Northern Irish", "Norwegian", "Omani", "Pakistani", "Palauan", "Palestinian", "Panamanian", "Papua New Guinean", "Paraguayan", "Peruvian", "Polish", "Portuguese", "Qatari", "Romanian", "Russian", "Rwandan", "Saint Lucian", "Salvadoran", "Samoan", "San Marinese", "Sao Tomean", "Saudi", "Scottish", "Senegalese", "Serbian", "Seychellois", "Sierra Leonean", "Singaporean", "Slovakian", "Slovenian", "Solomon Islander", "Somali", "South African", "South Korean", "South Sudanese", "Spanish", "Sri Lankan", "Sudanese", "Surinamer", "Swazi", "Swedish", "Swiss", "Syrian", "Tajikistani", "Tanzanian", "Thai", "Togolese", "Tongan", "Trinidadian Tobagonian", "Tunisian", "Turkish", "Turkmen", "Tuvaluan", "Ugandan", "Ukrainian", "Uruguayan", "Uzbekistani", "Venezuelan", "Vietnamese", "Welsh", "Yemeni", "Zambian", "Zimbabwean"]}], "nationality_mix": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "body_type": ["COMBO", {"multiselect": false, "options": ["-", "Underweight", "Normal weight", "Overweight", "Obese"]}], "body_type_weight": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "model_pose": ["COMBO", {"multiselect": false, "options": ["-", "Power Pose", "Walking Pose", "The Over-the-Shoulder Look", "S-curve Pose", "Sitting Pose", "Close-Up Beauty Shot Pose", "Leaning Pose", "Arms Up Pose", "Casual Stroll Pose", "Headshot Pose", "Sitting Cross-Legged Pose", "Back Arch Pose", "Hand-on-Hip Pose", "Gazing into the Distance Pose", "Candid Laugh Pose", "Dynamic Action Pose", "Contrapposto Pose", "High Fashion Pose"]}], "eyes_color": ["COMBO", {"multiselect": false, "options": ["-", "Brown", "Blue", "Green", "Hazel", "Gray", "Amber", "Red", "Violet"]}], "facial_expression": ["COMBO", {"multiselect": false, "options": ["-", "Happy", "Sad", "Angry", "Surprised", "Fearful", "Disgusted", "Contemptuous", "Excited", "Nervous", "Confused", "Amused", "Content", "Disappointed", "Bored", "Relieved", "In love", "Shy", "Envious", "Proud", "Cautious", "Serious", "Serene", "Peaceful", "Calm"]}], "facial_expression_weight": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "face_shape": ["COMBO", {"multiselect": false, "options": ["-", "Oval", "Round", "Square", "Heart-shaped", "Long", "Rectangle", "Triangle", "Inverted Triangle", "Pear-shaped", "Oblong", "Square Round", "Square Oval"]}], "face_shape_weight": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "facial_asymmetry": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "hair_style": ["COMBO", {"multiselect": false, "options": ["-", "Asymmetrical cut", "Blunt cut", "Bob cut", "Braided bob", "Buzz cut", "Choppy cut", "Curly bob", "Curtain bangs", "Faux hawk", "Feathered cut", "French bob", "Layered cut", "Long bob", "Mohawk", "Pixie cut", "Shag cut", "Side-swept bangs", "Textured cut", "Undercut", "Wavy bob", "Faux hawk short pixie", "Brave short haircut with shaved sides", "Tapered haricut wuth shaved side", "Stacked bob", "Lemonade braids", "Middle part ponytails", "Stitch braids", "Deep side part", "French braids", "Box braids", "Two dutch braids", "Wavy cut with curtains bangs", "Right side shaved", "Sweeping pixie", "Smooth lob", "Long pixie", "Sideswept pixie", "Italian bob", "Shullet"]}], "hair_color": ["COMBO", {"multiselect": false, "options": ["-", "Black", "Brown", "Blonde", "Red", "Auburn", "Chestnut", "Gray", "White", "Salt and pepper"]}], "disheveled": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "beard": ["COMBO", {"multiselect": false, "options": ["-", "Stubble Beard", "Goatee", "Full Beard", "Van Dyke Beard", "Soul Patch", "Garibaldi Beard", "Mutton Chops", "Circle Beard", "Corporate Beard", "Balbo Beard", "Ducktail Beard", "Chinstrap Beard", "Anchor Beard", "Chevron Mustache", "Horseshoe Mustache", "Handlebar Mustache", "Imperial Mustache", "Pencil Mustache", "Friendly Mutton Chops", "Zappa Mustache"]}], "skin_details": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "skin_pores": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "dimples": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "freckles": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "moles": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "skin_imperfections": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "skin_acne": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "tanned_skin": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "eyes_details": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "iris_details": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "circular_iris": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "circular_pupil": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "light_type": ["COMBO", {"multiselect": false, "options": ["-", "Natural sunlight", "Soft ambient light", "Harsh sunlight", "Overcast sky", "Sunset glow", "Sunrise warmth", "Twilight hues", "Candlelight", "Incandescent lighting", "Fluorescent lighting", "Moonlight", "Dappled sunlight", "Backlit silhouette", "Spotlight", "Rim lighting", "Firelight", "City streetlights", "Studio lighting", "Lantern light", "Tungsten lighting", "Cloudy day diffused light", "Skylight", "Golden hour light", "Blue hour light", "Flash photography", "Stage lighting", "Neon lights", "Torchlight", "Softbox lighting", "Rim light", "Lightning", "Abstract light patterns"]}], "light_direction": ["COMBO", {"multiselect": false, "options": ["-", "top", "bottom", "right", "left", "front", "rear", "top-right", "top-left", "bottom-right", "bottom-left"]}], "light_weight": ["FLOAT", {"default": 0, "min": 0, "max": 1.95, "step": 0.05, "display": "slider"}], "photorealism_improvement": ["COMBO", {"multiselect": false, "options": ["enable", "disable"]}], "prompt_start": ["STRING", {"default": "raw photo, (realistic:1.5)", "multiline": true}], "prompt_additional": ["STRING", {"default": "", "multiline": true}], "prompt_end": ["STRING", {"default": "", "multiline": true}], "negative_prompt": ["STRING", {"default": "", "multiline": true}]}}, "input_order": {"required": ["shot", "shot_weight", "gender", "age", "nationality_1", "nationality_2", "nationality_mix", "body_type", "body_type_weight", "model_pose", "eyes_color", "facial_expression", "facial_expression_weight", "face_shape", "face_shape_weight", "facial_asymmetry", "hair_style", "hair_color", "disheveled", "beard", "skin_details", "skin_pores", "dimples", "freckles", "moles", "skin_imperfections", "skin_acne", "tanned_skin", "eyes_details", "iris_details", "circular_iris", "circular_pupil", "light_type", "light_direction", "light_weight", "photorealism_improvement", "prompt_start", "prompt_additional", "prompt_end", "negative_prompt"]}, "is_input_list": false, "output": ["STRING", "STRING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy portraitMaster", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy multiAngle": {"input": {"required": {}, "optional": {"multi_angle": ["EASY_MULTI_ANGLE", {}]}}, "input_order": {"required": [], "optional": ["multi_angle"]}, "is_input_list": false, "output": ["STRING", "EASY_MULTI_ANGLE"], "output_is_list": [true, false], "output_name": ["prompt", "params"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy multiAngle", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Prompt", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy fullLoader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors", "None"]], "config_name": [["Default", "anything_v3.yaml", "v1-inference.yaml", "v1-inference_clip_skip_2.yaml", "v1-inference_clip_skip_2_fp16.yaml", "v1-inference_fp16.yaml", "v1-inpainting-inference.yaml", "v2-inference-v.yaml", "v2-inference-v_fp32.yaml", "v2-inference.yaml", "v2-inference_fp32.yaml", "v2-inpainting-inference.yaml"], {"default": "Default"}], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "clip_skip": ["INT", {"default": -2, "min": -24, "max": 0, "step": 1}], "lora_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "lora_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "512 x 512"}], "empty_latent_width": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "positive_token_normalization": [["none", "mean", "length", "length+mean"]], "positive_weight_interpretation": [["comfy", "A1111", "comfy++", "compel", "fixed attention"]], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "negative_token_normalization": [["none", "mean", "length", "length+mean"]], "negative_weight_interpretation": [["comfy", "A1111", "comfy++", "compel", "fixed attention"]], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}]}, "optional": {"model_override": ["MODEL"], "clip_override": ["CLIP"], "vae_override": ["VAE"], "optional_lora_stack": ["LORA_STACK"], "optional_controlnet_stack": ["CONTROL_NET_STACK"], "a1111_prompt_style": ["BOOLEAN", {"default": false}]}, "hidden": {"video_length": "INT", "prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "config_name", "vae_name", "clip_skip", "lora_name", "lora_model_strength", "lora_clip_strength", "resolution", "empty_latent_width", "empty_latent_height", "positive", "positive_token_normalization", "positive_weight_interpretation", "negative", "negative_token_normalization", "negative_weight_interpretation", "batch_size"], "optional": ["model_override", "clip_override", "vae_override", "optional_lora_stack", "optional_controlnet_stack", "a1111_prompt_style"], "hidden": ["video_length", "prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE", "CLIP", "CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false, false, false, false, false], "output_name": ["pipe", "model", "vae", "clip", "positive", "negative", "latent"], "name": "easy fullLoader", "display_name": "EasyLoader (Full)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy a1111Loader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "clip_skip": ["INT", {"default": -2, "min": -24, "max": 0, "step": 1}], "lora_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "lora_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "512 x 512"}], "empty_latent_width": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}]}, "optional": {"optional_lora_stack": ["LORA_STACK"], "optional_controlnet_stack": ["CONTROL_NET_STACK"], "a1111_prompt_style": ["BOOLEAN", {"default": false}]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "vae_name", "clip_skip", "lora_name", "lora_model_strength", "lora_clip_strength", "resolution", "empty_latent_width", "empty_latent_height", "positive", "negative", "batch_size"], "optional": ["optional_lora_stack", "optional_controlnet_stack", "a1111_prompt_style"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy a1111Loader", "display_name": "EasyLoader (A1111)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy comfyLoader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "clip_skip": ["INT", {"default": -2, "min": -24, "max": 0, "step": 1}], "lora_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "lora_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "512 x 512"}], "empty_latent_width": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}]}, "optional": {"optional_lora_stack": ["LORA_STACK"], "optional_controlnet_stack": ["CONTROL_NET_STACK"]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "vae_name", "clip_skip", "lora_name", "lora_model_strength", "lora_clip_strength", "resolution", "empty_latent_width", "empty_latent_height", "positive", "negative", "batch_size"], "optional": ["optional_lora_stack", "optional_controlnet_stack"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy comfyLoader", "display_name": "EasyLoader (Comfy)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy svdLoader": {"input": {"required": {"ckpt_name": [["SVD/svd.safetensors", "SVD/svd_xt.safetensors", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors"]], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "clip_name": [["None", "EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "init_image": ["IMAGE"], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "1024 x 576"}], "empty_latent_width": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "video_frames": ["INT", {"default": 14, "min": 1, "max": 4096}], "motion_bucket_id": ["INT", {"default": 127, "min": 1, "max": 1023}], "fps": ["INT", {"default": 6, "min": 1, "max": 1024}], "augmentation_level": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}]}, "optional": {"optional_positive": ["STRING", {"default": "", "multiline": true}], "optional_negative": ["STRING", {"default": "", "multiline": true}]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "vae_name", "clip_name", "init_image", "resolution", "empty_latent_width", "empty_latent_height", "video_frames", "motion_bucket_id", "fps", "augmentation_level"], "optional": ["optional_positive", "optional_negative"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy svdLoader", "display_name": "EasyLoader (SVD)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy sv3dLoader": {"input": {"required": {"ckpt_name": [[]], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "init_image": ["IMAGE"], "empty_latent_width": ["INT", {"default": 576, "min": 16, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 576, "min": 16, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 21, "min": 1, "max": 4096}], "interp_easing": [["linear", "ease_in", "ease_out", "ease_in_out"], {"default": "linear"}], "easing_mode": [["azimuth", "elevation", "custom"], {"default": "azimuth"}]}, "optional": {"scheduler": ["STRING", {"default": "", "multiline": true}]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "vae_name", "init_image", "empty_latent_width", "empty_latent_height", "batch_size", "interp_easing", "easing_mode"], "optional": ["scheduler"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "STRING"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "interp_log"], "name": "easy sv3dLoader", "display_name": "EasyLoader (SV3D)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy zero123Loader": {"input": {"required": {"ckpt_name": [[]], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "init_image": ["IMAGE"], "empty_latent_width": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 64}], "elevation": ["FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}], "azimuth": ["FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "vae_name", "init_image", "empty_latent_width", "empty_latent_height", "batch_size", "elevation", "azimuth"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy zero123Loader", "display_name": "EasyLoader (Zero123)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy cascadeLoader": {"input": {"required": {"stage_c": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-dev.sft", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux1-schnell.sft", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan3d-dit-v2-1/model.fp16.ckpt", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "stage_b": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-dev.sft", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux1-schnell.sft", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan3d-dit-v2-1/model.fp16.ckpt", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "stage_a": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "clip_name": [["None", "EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "lora_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "lora_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "1024 x 1024"}], "empty_latent_width": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 1024, "min": 16, "max": 16384, "step": 8}], "compression": ["INT", {"default": 42, "min": 32, "max": 64, "step": 1}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 64}]}, "optional": {"optional_lora_stack": ["LORA_STACK"]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["stage_c", "stage_b", "stage_a", "clip_name", "lora_name", "lora_model_strength", "lora_clip_strength", "resolution", "empty_latent_width", "empty_latent_height", "compression", "positive", "negative", "batch_size"], "optional": ["optional_lora_stack"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "LATENT", "VAE"], "output_is_list": [false, false, false, false], "output_name": ["pipe", "model_c", "latent_c", "vae"], "name": "easy cascadeLoader", "display_name": "EasyCascadeLoader", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy kolorsLoader": {"input": {"required": {"unet_name": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-dev.sft", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux1-schnell.sft", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan3d-dit-v2-1/model.fp16.ckpt", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors"]], "vae_name": [["FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "chatglm3_name": [["Florence-2-Flux-Large/model.safetensors", "Florence-2-SD3-Captioner/model.safetensors", "Florence-2-base-PromptGen-v2.0/model.safetensors", "Florence-2-base-ft/pytorch_model.bin", "Florence-2-base/pytorch_model.bin", "Florence-2-large-PromptGen-v1.5/model.safetensors", "Florence-2-large-PromptGen-v2.0/model.safetensors", "Florence-2-large-ft/pytorch_model.bin", "Florence-2-large/pytorch_model.bin", "Florence-2-pixelpros/adapter_model.safetensors", "Llama-3.2-3B-Instruct/model-00001-of-00002.safetensors", "Llama-3.2-3B-Instruct/model-00002-of-00002.safetensors", "Llama-3.2-3B-Instruct/model.safetensors", "Meta-Llama-3.1-8B-Instruct-bnb-4bit/model.safetensors", "OmniGen-v1/model-fp8_e4m3fn.safetensors", "OmniGen-v1/model.safetensors", "OmniGen-v1/vae/diffusion_pytorch_model.safetensors", "checkpoints/chatglm3-4bit.safetensors", "checkpoints/chatglm3-8bit.safetensors", "checkpoints/chatglm3-fp16.safetensors", "llava-llama-3-8b-text-encoder-tokenizer/model-00001-of-00004.safetensors", "llava-llama-3-8b-text-encoder-tokenizer/model-00002-of-00004.safetensors", "llava-llama-3-8b-text-encoder-tokenizer/model-00003-of-00004.safetensors", "llava-llama-3-8b-text-encoder-tokenizer/model-00004-of-00004.safetensors", "llava-llama-3-8b-v1_1-transformers/model-00001-of-00004.safetensors", "llava-llama-3-8b-v1_1-transformers/model-00002-of-00004.safetensors", "llava-llama-3-8b-v1_1-transformers/model-00003-of-00004.safetensors", "llava-llama-3-8b-v1_1-transformers/model-00004-of-00004.safetensors"]], "lora_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "lora_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "1024 x 576"}], "empty_latent_width": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 64}]}, "optional": {"model_override": ["MODEL"], "vae_override": ["VAE"], "optional_lora_stack": ["LORA_STACK"], "auto_clean_gpu": ["BOOLEAN", {"default": false}]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["unet_name", "vae_name", "chatglm3_name", "lora_name", "lora_model_strength", "lora_clip_strength", "resolution", "empty_latent_width", "empty_latent_height", "positive", "negative", "batch_size"], "optional": ["model_override", "vae_override", "optional_lora_stack", "auto_clean_gpu"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy kolorsLoader", "display_name": "EasyLoader (Kolors)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy fluxLoader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors", "None"]], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "lora_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "lora_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "1024 x 1024"}], "empty_latent_width": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 64}]}, "optional": {"model_override": ["MODEL"], "clip_override": ["CLIP"], "vae_override": ["VAE"], "optional_lora_stack": ["LORA_STACK"], "optional_controlnet_stack": ["CONTROL_NET_STACK"]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "vae_name", "lora_name", "lora_model_strength", "lora_clip_strength", "resolution", "empty_latent_width", "empty_latent_height", "positive", "batch_size"], "optional": ["model_override", "clip_override", "vae_override", "optional_lora_stack", "optional_controlnet_stack"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy fluxLoader", "display_name": "EasyLoader (Flux)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy hunyuanDiTLoader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "lora_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "lora_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "1024 x 1024"}], "empty_latent_width": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 64}]}, "optional": {"optional_lora_stack": ["LORA_STACK"], "optional_controlnet_stack": ["CONTROL_NET_STACK"]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "vae_name", "lora_name", "lora_model_strength", "lora_clip_strength", "resolution", "empty_latent_width", "empty_latent_height", "positive", "negative", "batch_size"], "optional": ["optional_lora_stack", "optional_controlnet_stack"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy hunyuanDiTLoader", "display_name": "EasyLoader (HunyuanDiT)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy pixArtLoader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "model_name": [["PixArtMS_XL_2", "PixArtMS_Sigma_XL_2", "PixArtMS_Sigma_XL_2_900M", "PixArtMS_Sigma_XL_2_2K", "PixArt_XL_2", "ControlPixArtHalf", "ControlPixArtMSHalf"]], "vae_name": [["FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "t5_type": [["sd3"]], "clip_name": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "padding": ["INT", {"default": 1, "min": 1, "max": 300}], "t5_name": [[]], "device": [["auto", "cpu", "gpu"], {"default": "cpu"}], "dtype": [["default", "auto (comfy)", "FP32", "FP16", "BF16"]], "lora_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "lora_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "ratio": [["custom", "0.25", "0.26", "0.27", "0.28", "0.32", "0.33", "0.35", "0.40", "0.42", "0.48", "0.50", "0.52", "0.57", "0.60", "0.68", "0.72", "0.78", "0.82", "0.88", "0.94", "1.00", "1.07", "1.13", "1.21", "1.29", "1.38", "1.46", "1.67", "1.75", "2.00", "2.09", "2.40", "2.50", "2.89", "3.00", "3.11", "3.62", "3.75", "3.88", "4.00"], {"default": "1.00"}], "empty_latent_width": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 64}]}, "optional": {"optional_lora_stack": ["LORA_STACK"]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "model_name", "vae_name", "t5_type", "clip_name", "padding", "t5_name", "device", "dtype", "lora_name", "lora_model_strength", "ratio", "empty_latent_width", "empty_latent_height", "positive", "negative", "batch_size"], "optional": ["optional_lora_stack"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy pixArtLoader", "display_name": "EasyLoader (PixArt)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy mochiLoader": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "vae_name": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"], {"default": "mochi_vae.safetensors"}], "positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "resolution": [["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"], {"default": "width x height (custom)"}], "empty_latent_width": ["INT", {"default": 848, "min": 64, "max": 16384, "step": 8}], "empty_latent_height": ["INT", {"default": 480, "min": 64, "max": 16384, "step": 8}], "length": ["INT", {"default": 25, "min": 7, "max": 16384, "step": 6}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}]}, "optional": {"model_override": ["MODEL"], "clip_override": ["CLIP"], "vae_override": ["VAE"]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name", "vae_name", "positive", "negative", "resolution", "empty_latent_width", "empty_latent_height", "length", "batch_size"], "optional": ["model_override", "clip_override", "vae_override"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "model", "vae"], "name": "easy mochiLoader", "display_name": "EasyLoader (Mochi)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy loraSwitcher": {"input": {"required": {"toggle": ["BOOLEAN", {"label_on": "on", "label_off": "off"}], "select": ["INT", {"default": 1, "min": 1, "max": 50}], "num_loras": ["INT", {"default": 1, "min": 1, "max": 50}], "lora_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}, "optional": {"optional_lora_stack": ["LORA_STACK"], "lora_1_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_2_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_3_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_4_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_5_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_6_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_7_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_8_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_9_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_10_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_11_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_12_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_13_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_14_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_15_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_16_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_17_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_18_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_19_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_20_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_21_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_22_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_23_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_24_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_25_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_26_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_27_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_28_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_29_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_30_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_31_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_32_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_33_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_34_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_35_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_36_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_37_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_38_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_39_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_40_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_41_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_42_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_43_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_44_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_45_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_46_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_47_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_48_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_49_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_50_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}]}}, "input_order": {"required": ["toggle", "select", "num_loras", "lora_strength"], "optional": ["optional_lora_stack", "lora_1_name", "lora_2_name", "lora_3_name", "lora_4_name", "lora_5_name", "lora_6_name", "lora_7_name", "lora_8_name", "lora_9_name", "lora_10_name", "lora_11_name", "lora_12_name", "lora_13_name", "lora_14_name", "lora_15_name", "lora_16_name", "lora_17_name", "lora_18_name", "lora_19_name", "lora_20_name", "lora_21_name", "lora_22_name", "lora_23_name", "lora_24_name", "lora_25_name", "lora_26_name", "lora_27_name", "lora_28_name", "lora_29_name", "lora_30_name", "lora_31_name", "lora_32_name", "lora_33_name", "lora_34_name", "lora_35_name", "lora_36_name", "lora_37_name", "lora_38_name", "lora_39_name", "lora_40_name", "lora_41_name", "lora_42_name", "lora_43_name", "lora_44_name", "lora_45_name", "lora_46_name", "lora_47_name", "lora_48_name", "lora_49_name", "lora_50_name"]}, "is_input_list": false, "output": ["LORA_STACK", "*"], "output_is_list": [false, false], "output_name": ["lora_stack", "lora_name"], "name": "easy loraSwitcher", "display_name": "EasyLoraSwitcher", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy loraStack": {"input": {"required": {"toggle": ["BOOLEAN", {"label_on": "on", "label_off": "off"}], "mode": [["simple", "advanced"]], "num_loras": ["INT", {"default": 1, "min": 1, "max": 10}]}, "optional": {"optional_lora_stack": ["LORA_STACK"], "lora_1_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_1_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_1_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_1_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_2_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_2_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_2_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_2_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_3_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_3_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_3_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_3_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_4_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_4_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_4_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_4_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_5_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_5_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_5_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_5_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_6_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_6_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_6_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_6_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_7_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_7_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_7_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_7_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_8_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_8_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_8_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_8_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_9_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_9_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_9_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_9_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_10_name": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"default": "None"}], "lora_10_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_10_model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_10_clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["toggle", "mode", "num_loras"], "optional": ["optional_lora_stack", "lora_1_name", "lora_1_strength", "lora_1_model_strength", "lora_1_clip_strength", "lora_2_name", "lora_2_strength", "lora_2_model_strength", "lora_2_clip_strength", "lora_3_name", "lora_3_strength", "lora_3_model_strength", "lora_3_clip_strength", "lora_4_name", "lora_4_strength", "lora_4_model_strength", "lora_4_clip_strength", "lora_5_name", "lora_5_strength", "lora_5_model_strength", "lora_5_clip_strength", "lora_6_name", "lora_6_strength", "lora_6_model_strength", "lora_6_clip_strength", "lora_7_name", "lora_7_strength", "lora_7_model_strength", "lora_7_clip_strength", "lora_8_name", "lora_8_strength", "lora_8_model_strength", "lora_8_clip_strength", "lora_9_name", "lora_9_strength", "lora_9_model_strength", "lora_9_clip_strength", "lora_10_name", "lora_10_strength", "lora_10_model_strength", "lora_10_clip_strength"]}, "is_input_list": false, "output": ["LORA_STACK"], "output_is_list": [false], "output_name": ["lora_stack"], "name": "easy loraStack", "display_name": "EasyLoraStack", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy controlnetStack": {"input": {"required": {"toggle": ["BOOLEAN", {"label_on": "enabled", "label_off": "disabled"}], "mode": [["simple", "advanced"]], "num_controlnet": ["INT", {"default": 1, "min": 1, "max": 3}]}, "optional": {"optional_controlnet_stack": ["CONTROL_NET_STACK"], "controlnet_1": [["None", "FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"], {"default": "None"}], "controlnet_1_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "start_percent_1": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent_1": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "scale_soft_weight_1": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "image_1": ["IMAGE"], "controlnet_2": [["None", "FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"], {"default": "None"}], "controlnet_2_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "start_percent_2": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent_2": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "scale_soft_weight_2": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "image_2": ["IMAGE"], "controlnet_3": [["None", "FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"], {"default": "None"}], "controlnet_3_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "start_percent_3": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent_3": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "scale_soft_weight_3": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "image_3": ["IMAGE"]}}, "input_order": {"required": ["toggle", "mode", "num_controlnet"], "optional": ["optional_controlnet_stack", "controlnet_1", "controlnet_1_strength", "start_percent_1", "end_percent_1", "scale_soft_weight_1", "image_1", "controlnet_2", "controlnet_2_strength", "start_percent_2", "end_percent_2", "scale_soft_weight_2", "image_2", "controlnet_3", "controlnet_3_strength", "start_percent_3", "end_percent_3", "scale_soft_weight_3", "image_3"]}, "is_input_list": false, "output": ["CONTROL_NET_STACK"], "output_is_list": [false], "output_name": ["controlnet_stack"], "name": "easy controlnetStack", "display_name": "EasyControlnetStack", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy controlnetLoader": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "control_net_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]]}, "optional": {"control_net": ["CONTROL_NET"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "scale_soft_weights": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["pipe", "image", "control_net_name"], "optional": ["control_net", "strength", "scale_soft_weights"]}, "is_input_list": false, "output": ["PIPE_LINE", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false], "output_name": ["pipe", "positive", "negative"], "name": "easy controlnetLoader", "display_name": "EasyControlnet", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy controlnetLoaderADV": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "control_net_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]]}, "optional": {"control_net": ["CONTROL_NET"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "scale_soft_weights": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["pipe", "image", "control_net_name"], "optional": ["control_net", "strength", "start_percent", "end_percent", "scale_soft_weights"]}, "is_input_list": false, "output": ["PIPE_LINE", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false], "output_name": ["pipe", "positive", "negative"], "name": "easy controlnetLoaderADV", "display_name": "EasyControlnet (Advanced)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy controlnetLoader++": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "control_net_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]]}, "optional": {"control_net": ["CONTROL_NET"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "scale_soft_weights": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "union_type": [["auto", "openpose", "depth", "hed/pidi/scribble/ted", "canny/lineart/anime_lineart/mlsd", "normal", "segment", "tile", "repaint"]]}}, "input_order": {"required": ["pipe", "image", "control_net_name"], "optional": ["control_net", "strength", "start_percent", "end_percent", "scale_soft_weights", "union_type"]}, "is_input_list": false, "output": ["PIPE_LINE", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false], "output_name": ["pipe", "positive", "negative"], "name": "easy controlnetLoader++", "display_name": "EasyControlnet++", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy LLLiteLoader": {"input": {"required": {"model": ["MODEL"], "model_name": [["sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors"]], "cond_image": ["IMAGE"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "steps": ["INT", {"default": 0, "min": 0, "max": 200, "step": 1}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}], "end_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}]}}, "input_order": {"required": ["model", "model_name", "cond_image", "strength", "steps", "start_percent", "end_percent"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "easy LLLiteLoader", "display_name": "EasyLLLite", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Loaders", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy loraPromptApply": {"input": {"required": {"model": ["MODEL"], "clip": ["CLIP"], "positive": ["STRING", {"default": "", "forceInput": true}]}, "optional": {"negative": ["STRING", {"default": "", "forceInput": true}]}}, "input_order": {"required": ["model", "clip", "positive"], "optional": ["negative"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "STRING", "STRING"], "output_is_list": [false, false, false, false], "output_name": ["model", "clip", "positive", "negative"], "name": "easy loraPromptApply", "display_name": "Easy Apply LoraPrompt", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy loraStackApply": {"input": {"required": {"lora_stack": ["LORA_STACK"], "model": ["MODEL"]}, "optional": {"optional_clip": ["CLIP"]}}, "input_order": {"required": ["lora_stack", "model"], "optional": ["optional_clip"]}, "is_input_list": false, "output": ["MODEL", "CLIP"], "output_is_list": [false, false], "output_name": ["model", "clip"], "name": "easy loraStackApply", "display_name": "Easy Apply LoraStack", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy controlnetStackApply": {"input": {"required": {"controlnet_stack": ["CONTROL_NET_STACK"], "pipe": ["PIPE_LINE"]}, "optional": {}}, "input_order": {"required": ["controlnet_stack", "pipe"], "optional": []}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy controlnetStackApply", "display_name": "Easy Apply CnetStack", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ipadapterApply": {"input": {"required": {"model": ["MODEL"], "image": ["IMAGE"], "preset": [["LIGHT - SD1.5 only (low strength)", "STANDARD (medium strength)", "VIT-G (medium strength)", "PLUS (high strength)", "PLUS (kolors genernal)", "REGULAR - FLUX and SD3.5 only (high strength)", "PLUS FACE (portraits)", "FULL FACE - SD1.5 only (portraits stronger)", "COMPOSITION", "FACEID", "FACEID PLUS - SD1.5 only", "FACEID PLUS KOLORS", "FACEID PLUS V2", "FACEID PORTRAIT (style transfer)", "FACEID PORTRAIT UNNORM - SDXL only (strong)"]], "lora_strength": ["FLOAT", {"default": 0.6, "min": 0, "max": 1, "step": 0.01}], "provider": [["CPU", "CUDA", "ROCM", "DirectML", "OpenVINO", "CoreML"], {"default": "CUDA"}], "weight": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "weight_faceidv2": ["FLOAT", {"default": 1.0, "min": -1, "max": 5.0, "step": 0.05}], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "cache_mode": [["insightface only", "clip_vision only", "ipadapter only", "all", "none"], {"default": "all"}], "use_tiled": ["BOOLEAN", {"default": false}]}, "optional": {"attn_mask": ["MASK"], "optional_ipadapter": ["IPADAPTER"]}}, "input_order": {"required": ["model", "image", "preset", "lora_strength", "provider", "weight", "weight_faceidv2", "start_at", "end_at", "cache_mode", "use_tiled"], "optional": ["attn_mask", "optional_ipadapter"]}, "is_input_list": false, "output": ["MODEL", "IMAGE", "MASK", "IPADAPTER"], "output_is_list": [false, false, false, false], "output_name": ["model", "images", "masks", "ipadapter"], "name": "easy ipadapterApply", "display_name": "Easy Apply IPAdapter", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ipadapterApplyADV": {"input": {"required": {"model": ["MODEL"], "image": ["IMAGE"], "preset": [["LIGHT - SD1.5 only (low strength)", "STANDARD (medium strength)", "VIT-G (medium strength)", "PLUS (high strength)", "PLUS (kolors genernal)", "REGULAR - FLUX and SD3.5 only (high strength)", "PLUS FACE (portraits)", "FULL FACE - SD1.5 only (portraits stronger)", "COMPOSITION", "FACEID", "FACEID PLUS - SD1.5 only", "FACEID PLUS KOLORS", "FACEID PLUS V2", "FACEID PORTRAIT (style transfer)", "FACEID PORTRAIT UNNORM - SDXL only (strong)"]], "lora_strength": ["FLOAT", {"default": 0.6, "min": 0, "max": 1, "step": 0.01}], "provider": [["CPU", "CUDA", "ROCM", "DirectML", "OpenVINO", "CoreML"], {"default": "CUDA"}], "weight": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "weight_faceidv2": ["FLOAT", {"default": 1.0, "min": -1, "max": 5.0, "step": 0.05}], "weight_type": [["linear", "ease in", "ease out", "ease in-out", "reverse in-out", "weak input", "weak output", "weak middle", "strong middle", "style transfer", "composition", "strong style transfer", "style and composition", "style transfer precise"]], "combine_embeds": [["concat", "add", "subtract", "average", "norm average"]], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "embeds_scaling": [["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"]], "cache_mode": [["insightface only", "clip_vision only", "ipadapter only", "all", "none"], {"default": "all"}], "use_tiled": ["BOOLEAN", {"default": false}], "use_batch": ["BOOLEAN", {"default": false}], "sharpening": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05}]}, "optional": {"image_negative": ["IMAGE"], "attn_mask": ["MASK"], "clip_vision": ["CLIP_VISION"], "optional_ipadapter": ["IPADAPTER"], "layer_weights": ["STRING", {"default": "", "multiline": true, "placeholder": "Mad Scientist Layer Weights"}]}}, "input_order": {"required": ["model", "image", "preset", "lora_strength", "provider", "weight", "weight_faceidv2", "weight_type", "combine_embeds", "start_at", "end_at", "embeds_scaling", "cache_mode", "use_tiled", "use_batch", "sharpening"], "optional": ["image_negative", "attn_mask", "clip_vision", "optional_ipadapter", "layer_weights"]}, "is_input_list": false, "output": ["MODEL", "IMAGE", "MASK", "IPADAPTER"], "output_is_list": [false, false, false, false], "output_name": ["model", "images", "masks", "ipadapter"], "name": "easy ipadapterApplyADV", "display_name": "Easy Apply IPAdapter (Advanced)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ipadapterApplyFaceIDKolors": {"input": {"required": {"model": ["MODEL"], "image": ["IMAGE"], "preset": [["FACEID PLUS KOLORS"], {"default": "FACEID PLUS KOLORS"}], "lora_strength": ["FLOAT", {"default": 0.6, "min": 0, "max": 1, "step": 0.01}], "provider": [["CPU", "CUDA", "ROCM", "DirectML", "OpenVINO", "CoreML"], {"default": "CUDA"}], "weight": ["FLOAT", {"default": 0.8, "min": -1, "max": 3, "step": 0.05}], "weight_faceidv2": ["FLOAT", {"default": 1.0, "min": -1, "max": 5.0, "step": 0.05}], "weight_kolors": ["FLOAT", {"default": 0.8, "min": -1, "max": 5.0, "step": 0.05}], "weight_type": [["linear", "ease in", "ease out", "ease in-out", "reverse in-out", "weak input", "weak output", "weak middle", "strong middle", "style transfer", "composition", "strong style transfer", "style and composition", "style transfer precise"]], "combine_embeds": [["concat", "add", "subtract", "average", "norm average"]], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "embeds_scaling": [["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"]], "cache_mode": [["insightface only", "clip_vision only", "ipadapter only", "all", "none"], {"default": "all"}], "use_tiled": ["BOOLEAN", {"default": false}], "use_batch": ["BOOLEAN", {"default": false}], "sharpening": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05}]}, "optional": {"image_negative": ["IMAGE"], "attn_mask": ["MASK"], "clip_vision": ["CLIP_VISION"], "optional_ipadapter": ["IPADAPTER"]}}, "input_order": {"required": ["model", "image", "preset", "lora_strength", "provider", "weight", "weight_faceidv2", "weight_kolors", "weight_type", "combine_embeds", "start_at", "end_at", "embeds_scaling", "cache_mode", "use_tiled", "use_batch", "sharpening"], "optional": ["image_negative", "attn_mask", "clip_vision", "optional_ipadapter"]}, "is_input_list": false, "output": ["MODEL", "IMAGE", "MASK", "IPADAPTER"], "output_is_list": [false, false, false, false], "output_name": ["model", "images", "masks", "ipadapter"], "name": "easy ipadapterApplyFaceIDKolors", "display_name": "Easy Apply IPAdapter (FaceID Kolors)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ipadapterApplyEncoder": {"input": {"required": {"model": ["MODEL"], "clip_vision": ["CLIP_VISION"], "image1": ["IMAGE"], "preset": [["LIGHT - SD1.5 only (low strength)", "STANDARD (medium strength)", "VIT-G (medium strength)", "PLUS (high strength)", "PLUS (kolors genernal)", "REGULAR - FLUX and SD3.5 only (high strength)", "PLUS FACE (portraits)", "FULL FACE - SD1.5 only (portraits stronger)", "COMPOSITION"]], "num_embeds": ["INT", {"default": 2, "min": 1, "max": 4}]}, "optional": {"image2": ["IMAGE"], "image3": ["IMAGE"], "image4": ["IMAGE"], "mask1": ["MASK"], "weight1": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "mask2": ["MASK"], "weight2": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "mask3": ["MASK"], "weight3": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "mask4": ["MASK"], "weight4": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "combine_method": [["concat", "add", "subtract", "average", "norm average", "max", "min"]], "optional_ipadapter": ["IPADAPTER"], "pos_embeds": ["EMBEDS"], "neg_embeds": ["EMBEDS"]}}, "input_order": {"required": ["model", "clip_vision", "image1", "preset", "num_embeds"], "optional": ["image2", "image3", "image4", "mask1", "weight1", "mask2", "weight2", "mask3", "weight3", "mask4", "weight4", "combine_method", "optional_ipadapter", "pos_embeds", "neg_embeds"]}, "is_input_list": false, "output": ["MODEL", "CLIP_VISION", "IPADAPTER", "EMBEDS", "EMBEDS"], "output_is_list": [false, false, false, false, false], "output_name": ["model", "clip_vision", "ipadapter", "pos_embed", "neg_embed"], "name": "easy ipadapterApplyEncoder", "display_name": "Easy Apply IPAdapter (Encoder)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ipadapterApplyEmbeds": {"input": {"required": {"model": ["MODEL"], "clip_vision": ["CLIP_VISION"], "ipadapter": ["IPADAPTER"], "pos_embed": ["EMBEDS"], "weight": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "weight_type": [["linear", "ease in", "ease out", "ease in-out", "reverse in-out", "weak input", "weak output", "weak middle", "strong middle", "style transfer", "composition", "strong style transfer", "style and composition", "style transfer precise"]], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "embeds_scaling": [["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"]]}, "optional": {"neg_embed": ["EMBEDS"], "attn_mask": ["MASK"]}}, "input_order": {"required": ["model", "clip_vision", "ipadapter", "pos_embed", "weight", "weight_type", "start_at", "end_at", "embeds_scaling"], "optional": ["neg_embed", "attn_mask"]}, "is_input_list": false, "output": ["MODEL", "IPADAPTER"], "output_is_list": [false, false], "output_name": ["model", "ipadapter"], "name": "easy ipadapterApplyEmbeds", "display_name": "Easy Apply IPAdapter (Embeds)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ipadapterApplyRegional": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "positive": ["STRING", {"default": "", "placeholder": "positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "negative", "multiline": true}], "image_weight": ["FLOAT", {"default": 1.0, "min": -1.0, "max": 3.0, "step": 0.05}], "prompt_weight": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.05}], "weight_type": [["linear", "ease in", "ease out", "ease in-out", "reverse in-out", "weak input", "weak output", "weak middle", "strong middle", "style transfer", "composition", "strong style transfer", "style and composition", "style transfer precise"]], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}, "optional": {"mask": ["MASK"], "optional_ipadapter_params": ["IPADAPTER_PARAMS"]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "image", "positive", "negative", "image_weight", "prompt_weight", "weight_type", "start_at", "end_at"], "optional": ["mask", "optional_ipadapter_params"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "IPADAPTER_PARAMS", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false, false], "output_name": ["pipe", "ipadapter_params", "positive", "negative"], "name": "easy ipadapterApplyRegional", "display_name": "Easy Apply IPAdapter (Regional)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ipadapterApplyFromParams": {"input": {"required": {"model": ["MODEL"], "preset": [["LIGHT - SD1.5 only (low strength)", "STANDARD (medium strength)", "VIT-G (medium strength)", "PLUS (high strength)", "PLUS (kolors genernal)", "REGULAR - FLUX and SD3.5 only (high strength)", "PLUS FACE (portraits)", "FULL FACE - SD1.5 only (portraits stronger)", "COMPOSITION"]], "ipadapter_params": ["IPADAPTER_PARAMS"], "combine_embeds": [["concat", "add", "subtract", "average", "norm average", "max", "min"]], "embeds_scaling": [["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"]], "cache_mode": [["insightface only", "clip_vision only", "ipadapter only", "all", "none"], {"default": "insightface only"}]}, "optional": {"optional_ipadapter": ["IPADAPTER"], "image_negative": ["IMAGE"]}}, "input_order": {"required": ["model", "preset", "ipadapter_params", "combine_embeds", "embeds_scaling", "cache_mode"], "optional": ["optional_ipadapter", "image_negative"]}, "is_input_list": false, "output": ["MODEL", "IPADAPTER"], "output_is_list": [false, false], "output_name": ["model", "ipadapter"], "name": "easy ipadapterApplyFromParams", "display_name": "Easy Apply IPAdapter (From Params)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ipadapterStyleComposition": {"input": {"required": {"model": ["MODEL"], "image_style": ["IMAGE"], "preset": [["LIGHT - SD1.5 only (low strength)", "STANDARD (medium strength)", "VIT-G (medium strength)", "PLUS (high strength)", "PLUS (kolors genernal)", "REGULAR - FLUX and SD3.5 only (high strength)", "PLUS FACE (portraits)", "FULL FACE - SD1.5 only (portraits stronger)", "COMPOSITION"]], "weight_style": ["FLOAT", {"default": 1.0, "min": -1, "max": 5, "step": 0.05}], "weight_composition": ["FLOAT", {"default": 1.0, "min": -1, "max": 5, "step": 0.05}], "expand_style": ["BOOLEAN", {"default": false}], "combine_embeds": [["concat", "add", "subtract", "average", "norm average"], {"default": "average"}], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "embeds_scaling": [["V only", "K+V", "K+V w/ C penalty", "K+mean(V) w/ C penalty"]], "cache_mode": [["insightface only", "clip_vision only", "ipadapter only", "all", "none"], {"default": "all"}]}, "optional": {"image_composition": ["IMAGE"], "image_negative": ["IMAGE"], "attn_mask": ["MASK"], "clip_vision": ["CLIP_VISION"], "optional_ipadapter": ["IPADAPTER"]}}, "input_order": {"required": ["model", "image_style", "preset", "weight_style", "weight_composition", "expand_style", "combine_embeds", "start_at", "end_at", "embeds_scaling", "cache_mode"], "optional": ["image_composition", "image_negative", "attn_mask", "clip_vision", "optional_ipadapter"]}, "is_input_list": false, "output": ["MODEL", "IPADAPTER"], "output_is_list": [false, false], "output_name": ["model", "ipadapter"], "name": "easy ipadapterStyleComposition", "display_name": "Easy Apply IPAdapter (StyleComposition)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy instantIDApply": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "instantid_file": [["ip-adapter.bin"]], "insightface": [["CPU", "CUDA", "ROCM"]], "control_net_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]], "cn_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "cn_soft_weights": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "weight": ["FLOAT", {"default": 0.8, "min": 0.0, "max": 5.0, "step": 0.01}], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "noise": ["FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.05}]}, "optional": {"image_kps": ["IMAGE"], "mask": ["MASK"], "control_net": ["CONTROL_NET"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "image", "instantid_file", "insightface", "control_net_name", "cn_strength", "cn_soft_weights", "weight", "start_at", "end_at", "noise"], "optional": ["image_kps", "mask", "control_net"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false, false], "output_name": ["pipe", "model", "positive", "negative"], "name": "easy instantIDApply", "display_name": "Easy Apply InstantID", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy instantIDApplyADV": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "instantid_file": [["ip-adapter.bin"]], "insightface": [["CPU", "CUDA", "ROCM"]], "control_net_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/kohya_controllllite_xl_blur.safetensors", "sdxl/kohya_controllllite_xl_blur_anime.safetensors", "sdxl/kohya_controllllite_xl_blur_anime_beta.safetensors", "sdxl/kohya_controllllite_xl_canny.safetensors", "sdxl/kohya_controllllite_xl_canny_anime.safetensors", "sdxl/kohya_controllllite_xl_depth.safetensors", "sdxl/kohya_controllllite_xl_depth_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime.safetensors", "sdxl/kohya_controllllite_xl_openpose_anime_v2.safetensors", "sdxl/kohya_controllllite_xl_scribble_anime.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]], "cn_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "cn_soft_weights": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "weight": ["FLOAT", {"default": 0.8, "min": 0.0, "max": 5.0, "step": 0.01}], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "noise": ["FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.05}]}, "optional": {"image_kps": ["IMAGE"], "mask": ["MASK"], "control_net": ["CONTROL_NET"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "image", "instantid_file", "insightface", "control_net_name", "cn_strength", "cn_soft_weights", "weight", "start_at", "end_at", "noise"], "optional": ["image_kps", "mask", "control_net", "positive", "negative"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false, false], "output_name": ["pipe", "model", "positive", "negative"], "name": "easy instantIDApplyADV", "display_name": "Easy Apply InstantID (Advanced)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy pulIDApply": {"input": {"required": {"model": ["MODEL"], "pulid_file": [["flux/pulid_flux_v0.9.0.safetensors", "ip-adapter_pulid_sdxl_fp16.safetensors", "pulid_flux_v0.9.1.safetensors", "pulid_v1.1.safetensors"]], "insightface": [["CPU", "CUDA", "ROCM"]], "image": ["IMAGE"], "method": [["fidelity", "style", "neutral"]], "weight": ["FLOAT", {"default": 1.0, "min": -1.0, "max": 5.0, "step": 0.05}], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}, "optional": {"attn_mask": ["MASK"]}}, "input_order": {"required": ["model", "pulid_file", "insightface", "image", "method", "weight", "start_at", "end_at"], "optional": ["attn_mask"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["model"], "name": "easy pulIDApply", "display_name": "Easy Apply PuLID", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy pulIDApplyADV": {"input": {"required": {"model": ["MODEL"], "pulid_file": [["flux/pulid_flux_v0.9.0.safetensors", "ip-adapter_pulid_sdxl_fp16.safetensors", "pulid_flux_v0.9.1.safetensors", "pulid_v1.1.safetensors"]], "insightface": [["CPU", "CUDA", "ROCM"]], "image": ["IMAGE"], "weight": ["FLOAT", {"default": 1.0, "min": -1.0, "max": 5.0, "step": 0.05}], "projection": [["ortho_v2", "ortho", "none"], {"default": "ortho_v2"}], "fidelity": ["INT", {"default": 8, "min": 0, "max": 32, "step": 1}], "noise": ["FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.1}], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}, "optional": {"attn_mask": ["MASK"]}}, "input_order": {"required": ["model", "pulid_file", "insightface", "image", "weight", "projection", "fidelity", "noise", "start_at", "end_at"], "optional": ["attn_mask"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["model"], "name": "easy pulIDApplyADV", "display_name": "Easy Apply PuLID (Advanced)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy styleAlignedBatchAlign": {"input": {"required": {"model": ["MODEL"], "share_norm": [["both", "group", "layer", "disabled"]], "share_attn": [["q+k", "q+k+v", "disabled"]], "scale": ["FLOAT", {"default": 1, "min": 0, "max": 1.0, "step": 0.1}]}}, "input_order": {"required": ["model", "share_norm", "share_attn", "scale"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "easy styleAlignedBatchAlign", "display_name": "Easy Apply StyleAlign", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy icLightApply": {"input": {"required": {"mode": [["Foreground", "Foreground&Background"]], "model": ["MODEL"], "image": ["IMAGE"], "vae": ["VAE"], "lighting": [["None", "Left Light", "Right Light", "Top Light", "Bottom Light", "Circle Light"], {"default": "None"}], "source": [["Use Background Image", "Use Flipped Background Image", "Left Light", "Right Light", "Top Light", "Bottom Light", "Ambient"], {"default": "Use Background Image"}], "remove_bg": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["mode", "model", "image", "vae", "lighting", "source", "remove_bg"]}, "is_input_list": false, "output": ["MODEL", "IMAGE"], "output_is_list": [false, false], "output_name": ["model", "lighting_image"], "name": "easy icLightApply", "display_name": "Easy Apply ICLight", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Adapter", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy applyFooocusInpaint": {"input": {"required": {"model": ["MODEL"], "latent": ["LATENT"], "head": [["fooocus_inpaint_head"]], "patch": [["inpaint_v26 (1.32GB)", "inpaint_v25 (2.58GB)", "inpaint (1.32GB)"]]}}, "input_order": {"required": ["model", "latent", "head", "patch"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["model"], "name": "easy applyFooocusInpaint", "display_name": "Easy Apply Fooocus Inpaint", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Inpaint", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy applyBrushNet": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "mask": ["MASK"], "brushnet": [[]], "dtype": [["float16", "bfloat16", "float32", "float64"]], "scale": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}], "start_at": ["INT", {"default": 0, "min": 0, "max": 10000}], "end_at": ["INT", {"default": 10000, "min": 0, "max": 10000}]}}, "input_order": {"required": ["pipe", "image", "mask", "brushnet", "dtype", "scale", "start_at", "end_at"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy applyBrushNet", "display_name": "Easy Apply BrushNet", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Inpaint", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy applyPowerPaint": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "mask": ["MASK"], "powerpaint_model": [[]], "powerpaint_clip": [[]], "dtype": [["float16", "bfloat16", "float32", "float64"]], "fitting": ["FLOAT", {"default": 1.0, "min": 0.3, "max": 1.0}], "function": [["text guided", "shape guided", "object removal", "context aware", "image outpainting"]], "scale": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}], "start_at": ["INT", {"default": 0, "min": 0, "max": 10000}], "end_at": ["INT", {"default": 10000, "min": 0, "max": 10000}], "save_memory": [["none", "auto", "max"]]}}, "input_order": {"required": ["pipe", "image", "mask", "powerpaint_model", "powerpaint_clip", "dtype", "fitting", "function", "scale", "start_at", "end_at", "save_memory"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy applyPowerPaint", "display_name": "Easy Apply PowerPaint", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Inpaint", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy applyInpaint": {"input": {"required": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "mask": ["MASK"], "inpaint_mode": [["normal", "fooocus_inpaint", "brushnet_random", "brushnet_segmentation", "powerpaint"]], "encode": [["none", "vae_encode_inpaint", "inpaint_model_conditioning", "different_diffusion"], {"default": "none"}], "grow_mask_by": ["INT", {"default": 6, "min": 0, "max": 64, "step": 1}], "dtype": [["float16", "bfloat16", "float32", "float64"]], "fitting": ["FLOAT", {"default": 1.0, "min": 0.3, "max": 1.0}], "function": [["text guided", "shape guided", "object removal", "context aware", "image outpainting"]], "scale": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}], "start_at": ["INT", {"default": 0, "min": 0, "max": 10000}], "end_at": ["INT", {"default": 10000, "min": 0, "max": 10000}]}, "optional": {"noise_mask": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["pipe", "image", "mask", "inpaint_mode", "encode", "grow_mask_by", "dtype", "fitting", "function", "scale", "start_at", "end_at"], "optional": ["noise_mask"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy applyInpaint", "display_name": "Easy Apply Inpaint", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Inpaint", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSampling": {"input": {"required": {"pipe": ["PIPE_LINE"], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "align_your_steps", "gits"]], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "optional": {"image_to_latent": ["IMAGE"], "latent": ["LATENT"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "steps", "cfg", "sampler_name", "scheduler", "denoise", "seed"], "optional": ["image_to_latent", "latent"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSampling", "display_name": "PreSampling", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSamplingAdvanced": {"input": {"required": {"pipe": ["PIPE_LINE"], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "align_your_steps", "gits"]], "start_at_step": ["INT", {"default": 0, "min": 0, "max": 10000}], "end_at_step": ["INT", {"default": 10000, "min": 0, "max": 10000}], "add_noise": [["enable (CPU)", "enable (GPU=A1111)", "disable"], {"default": "enable (CPU)"}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}], "return_with_leftover_noise": [["disable", "enable"]]}, "optional": {"image_to_latent": ["IMAGE"], "latent": ["LATENT"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "steps", "cfg", "sampler_name", "scheduler", "start_at_step", "end_at_step", "add_noise", "seed", "return_with_leftover_noise"], "optional": ["image_to_latent", "latent"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSamplingAdvanced", "display_name": "PreSampling (Advanced)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSamplingNoiseIn": {"input": {"required": {"pipe": ["PIPE_LINE"], "factor": ["FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01, "round": 0.01}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "align_your_steps", "gits"]], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "optional": {"optional_noise_seed": ["INT", {"forceInput": true}], "optional_latent": ["LATENT"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "factor", "steps", "cfg", "sampler_name", "scheduler", "denoise", "seed"], "optional": ["optional_noise_seed", "optional_latent"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSamplingNoiseIn", "display_name": "PreSampling (NoiseIn)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSamplingCustom": {"input": {"required": {"pipe": ["PIPE_LINE"], "guider": [["CFG", "DualCFG", "Basic", "IP2P+CFG", "IP2P+DualCFG", "IP2P+Basic"], {"default": "Basic"}], "cfg": ["FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0}], "cfg_negative": ["FLOAT", {"default": 1.5, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2", "inversed_euler"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "karrasADV", "exponentialADV", "polyExponential", "sdturbo", "vp", "alignYourSteps", "gits"]], "coeff": ["FLOAT", {"default": 1.2, "min": 0.8, "max": 1.5, "step": 0.05}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "sigma_max": ["FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step": 0.01, "round": false}], "sigma_min": ["FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step": 0.01, "round": false}], "rho": ["FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": false}], "beta_d": ["FLOAT", {"default": 19.9, "min": 0.0, "max": 1000.0, "step": 0.01, "round": false}], "beta_min": ["FLOAT", {"default": 0.1, "min": 0.0, "max": 1000.0, "step": 0.01, "round": false}], "eps_s": ["FLOAT", {"default": 0.001, "min": 0.0, "max": 1.0, "step": 0.0001, "round": false}], "flip_sigmas": ["BOOLEAN", {"default": false}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "add_noise": [["enable (CPU)", "enable (GPU=A1111)", "disable"], {"default": "enable (CPU)"}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "optional": {"image_to_latent": ["IMAGE"], "latent": ["LATENT"], "optional_sampler": ["SAMPLER"], "optional_sigmas": ["SIGMAS"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "guider", "cfg", "cfg_negative", "sampler_name", "scheduler", "coeff", "steps", "sigma_max", "sigma_min", "rho", "beta_d", "beta_min", "eps_s", "flip_sigmas", "denoise", "add_noise", "seed"], "optional": ["image_to_latent", "latent", "optional_sampler", "optional_sigmas"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSamplingCustom", "display_name": "PreSampling (Custom)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSamplingSdTurbo": {"input": {"required": {"pipe": ["PIPE_LINE"], "steps": ["INT", {"default": 1, "min": 1, "max": 10}], "cfg": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "eta": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": false}], "s_noise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": false}], "upscale_ratio": ["FLOAT", {"default": 2.0, "min": 0.0, "max": 16.0, "step": 0.01, "round": false}], "start_step": ["INT", {"default": 5, "min": 0, "max": 1000, "step": 1}], "end_step": ["INT", {"default": 15, "min": 0, "max": 1000, "step": 1}], "upscale_n_step": ["INT", {"default": 3, "min": 0, "max": 1000, "step": 1}], "unsharp_kernel_size": ["INT", {"default": 3, "min": 1, "max": 21, "step": 1}], "unsharp_sigma": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step": 0.01, "round": false}], "unsharp_strength": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01, "round": false}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "steps", "cfg", "sampler_name", "eta", "s_noise", "upscale_ratio", "start_step", "end_step", "upscale_n_step", "unsharp_kernel_size", "unsharp_sigma", "unsharp_strength", "seed"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSamplingSdTurbo", "display_name": "PreSampling (SDTurbo)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSamplingDynamicCFG": {"input": {"required": {"pipe": ["PIPE_LINE"], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "cfg_mode": [["Constant", "Linear Down", "Cosine Down", "Half Cosine Down", "Linear Up", "Cosine Up", "Half Cosine Up", "Power Up", "Power Down", "Linear Repeating", "Cosine Repeating", "Sawtooth"]], "cfg_scale_min": ["FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.5}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "align_your_steps", "gits"]], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "optional": {"image_to_latent": ["IMAGE"], "latent": ["LATENT"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "steps", "cfg", "cfg_mode", "cfg_scale_min", "sampler_name", "scheduler", "denoise", "seed"], "optional": ["image_to_latent", "latent"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSamplingDynamicCFG", "display_name": "PreSampling (DynamicCFG)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSamplingCascade": {"input": {"required": {"pipe": ["PIPE_LINE"], "encode_vae_name": [["None", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "decode_vae_name": [["None", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 4.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"default": "euler_ancestral"}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], {"default": "simple"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "optional": {"image_to_latent_c": ["IMAGE"], "latent_c": ["LATENT"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "encode_vae_name", "decode_vae_name", "steps", "cfg", "sampler_name", "scheduler", "denoise", "seed"], "optional": ["image_to_latent_c", "latent_c"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSamplingCascade", "display_name": "PreSampling (Cascade)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSamplingLayerDiffusion": {"input": {"required": {"pipe": ["PIPE_LINE"], "method": [["Attention Injection", "Conv Injection", "Everything", "Foreground", "Background"]], "weight": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"default": "euler"}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "align_your_steps", "gits"], {"default": "normal"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "optional": {"image": ["IMAGE"], "blended_image": ["IMAGE"], "mask": ["MASK"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "method", "weight", "steps", "cfg", "sampler_name", "scheduler", "denoise", "seed"], "optional": ["image", "blended_image", "mask"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSamplingLayerDiffusion", "display_name": "PreSampling (LayerDiffuse)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preSamplingLayerDiffusionADDTL": {"input": {"required": {"pipe": ["PIPE_LINE"], "foreground_prompt": ["STRING", {"default": "", "placeholder": "Foreground Additional Prompt", "multiline": true}], "background_prompt": ["STRING", {"default": "", "placeholder": "Background Additional Prompt", "multiline": true}], "blended_prompt": ["STRING", {"default": "", "placeholder": "Blended Additional Prompt", "multiline": true}]}, "optional": {"optional_fg_cond": ["CONDITIONING"], "optional_bg_cond": ["CONDITIONING"], "optional_blended_cond": ["CONDITIONING"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "foreground_prompt", "background_prompt", "blended_prompt"], "optional": ["optional_fg_cond", "optional_bg_cond", "optional_blended_cond"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preSamplingLayerDiffusionADDTL", "display_name": "PreSampling (LayerDiffuse ADDTL)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "dynamicThresholdingFull": {"input": {"required": {"model": ["MODEL"], "mimic_scale": ["FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0, "step": 0.5}], "threshold_percentile": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "mimic_mode": [["Constant", "Linear Down", "Cosine Down", "Half Cosine Down", "Linear Up", "Cosine Up", "Half Cosine Up", "Power Up", "Power Down", "Linear Repeating", "Cosine Repeating", "Sawtooth"]], "mimic_scale_min": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.5}], "cfg_mode": [["Constant", "Linear Down", "Cosine Down", "Half Cosine Down", "Linear Up", "Cosine Up", "Half Cosine Up", "Power Up", "Power Down", "Linear Repeating", "Cosine Repeating", "Sawtooth"]], "cfg_scale_min": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.5}], "sched_val": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}], "separate_feature_channels": [["enable", "disable"]], "scaling_startpoint": [["MEAN", "ZERO"]], "variability_measure": [["AD", "STD"]], "interpolate_phi": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model", "mimic_scale", "threshold_percentile", "mimic_mode", "mimic_scale_min", "cfg_mode", "cfg_scale_min", "sched_val", "separate_feature_channels", "scaling_startpoint", "variability_measure", "interpolate_phi"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "dynamicThresholdingFull", "display_name": "DynamicThresholdingFull", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/PreSampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy fullkSampler": {"input": {"required": {"pipe": ["PIPE_LINE"], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "align_your_steps", "gits"]], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save", "None"]], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}], "model": ["MODEL"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "latent": ["LATENT"], "vae": ["VAE"], "clip": ["CLIP"], "xyPlot": ["XYPLOT"], "image": ["IMAGE"]}, "hidden": {"tile_size": "INT", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "steps", "cfg", "sampler_name", "scheduler", "denoise", "image_output", "link_id", "save_prefix"], "optional": ["seed", "model", "positive", "negative", "latent", "vae", "clip", "xyPlot", "image"], "hidden": ["tile_size", "prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "INT"], "output_is_list": [false, false, false, false, false, false, false, false, false], "output_name": ["pipe", "image", "model", "positive", "negative", "latent", "vae", "clip", "seed"], "name": "easy fullkSampler", "display_name": "EasyKSampler (Full)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy kSampler": {"input": {"required": {"pipe": ["PIPE_LINE"], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save", "None"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"model": ["MODEL"]}, "hidden": {"tile_size": "INT", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "image_output", "link_id", "save_prefix"], "optional": ["model"], "hidden": ["tile_size", "prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE"], "output_is_list": [false, false], "output_name": ["pipe", "image"], "name": "easy kSampler", "display_name": "EasyKSampler", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy kSamplerCustom": {"input": {"required": {"pipe": ["PIPE_LINE"], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save", "None"], {"default": "None"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"model": ["MODEL"]}, "hidden": {"tile_size": "INT", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "image_output", "link_id", "save_prefix"], "optional": ["model"], "hidden": ["tile_size", "prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "LATENT", "LATENT", "IMAGE"], "output_is_list": [false, false, false, false], "output_name": ["pipe", "output", "denoised_output", "image"], "name": "easy kSamplerCustom", "display_name": "EasyKSampler (Custom)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy kSamplerTiled": {"input": {"required": {"pipe": ["PIPE_LINE"], "tile_size": ["INT", {"default": 512, "min": 320, "max": 4096, "step": 64}], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save", "None"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"model": ["MODEL"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "tile_size", "image_output", "link_id", "save_prefix"], "optional": ["model"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE"], "output_is_list": [false, false], "output_name": ["pipe", "image"], "name": "easy kSamplerTiled", "display_name": "EasyKSampler (Tiled Decode)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy kSamplerLayerDiffusion": {"input": {"required": {"pipe": ["PIPE_LINE"], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"model": ["MODEL"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "image_output", "link_id", "save_prefix"], "optional": ["model"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE", "IMAGE", "MASK"], "output_is_list": [false, false, false, true], "output_name": ["pipe", "final_image", "original_image", "alpha"], "name": "easy kSamplerLayerDiffusion", "display_name": "EasyKSampler (LayerDiffuse)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy kSamplerInpainting": {"input": {"required": {"pipe": ["PIPE_LINE"], "grow_mask_by": ["INT", {"default": 6, "min": 0, "max": 64, "step": 1}], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}], "additional": [["None", "InpaintModelCond", "Differential Diffusion", "Fooocus Inpaint", "Fooocus Inpaint + DD", "Brushnet Random", "Brushnet Random + DD", "Brushnet Segmentation", "Brushnet Segmentation + DD"], {"default": "None"}]}, "optional": {"model": ["MODEL"], "mask": ["MASK"]}, "hidden": {"tile_size": "INT", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "grow_mask_by", "image_output", "link_id", "save_prefix", "additional"], "optional": ["model", "mask"], "hidden": ["tile_size", "prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE", "VAE"], "output_is_list": [false, false, false], "output_name": ["pipe", "image", "vae"], "name": "easy kSamplerInpainting", "display_name": "EasyKSampler (Inpainting)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy kSamplerDownscaleUnet": {"input": {"required": {"pipe": ["PIPE_LINE"], "downscale_mode": [["None", "Auto", "Custom"], {"default": "Auto"}], "block_number": ["INT", {"default": 3, "min": 1, "max": 32, "step": 1}], "downscale_factor": ["FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}], "downscale_after_skip": ["BOOLEAN", {"default": true}], "downscale_method": [["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]], "upscale_method": [["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"model": ["MODEL"]}, "hidden": {"tile_size": "INT", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "downscale_mode", "block_number", "downscale_factor", "start_percent", "end_percent", "downscale_after_skip", "downscale_method", "upscale_method", "image_output", "link_id", "save_prefix"], "optional": ["model"], "hidden": ["tile_size", "prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE"], "output_is_list": [false, false], "output_name": ["pipe", "image"], "name": "easy kSamplerDownscaleUnet", "display_name": "EasyKsampler (Downscale Unet)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy kSamplerSDTurbo": {"input": {"required": {"pipe": ["PIPE_LINE"], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"model": ["MODEL"]}, "hidden": {"tile_size": "INT", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "image_output", "link_id", "save_prefix"], "optional": ["model"], "hidden": ["tile_size", "prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE"], "output_is_list": [false, false], "output_name": ["pipe", "image"], "name": "easy kSamplerSDTurbo", "display_name": "EasyKSampler (SDTurbo)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy fullCascadeKSampler": {"input": {"required": {"pipe": ["PIPE_LINE"], "encode_vae_name": [["None", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "decode_vae_name": [["None", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 4.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"default": "euler_ancestral"}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], {"default": "simple"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"]], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}], "seed": ["INT", {"default": 0, "min": 0, "max": 1125899906842624}]}, "optional": {"image_to_latent_c": ["IMAGE"], "latent_c": ["LATENT"], "model_c": ["MODEL"]}, "hidden": {"tile_size": "INT", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "encode_vae_name", "decode_vae_name", "steps", "cfg", "sampler_name", "scheduler", "denoise", "image_output", "link_id", "save_prefix", "seed"], "optional": ["image_to_latent_c", "latent_c", "model_c"], "hidden": ["tile_size", "prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "LATENT"], "output_is_list": [false, false, false], "output_name": ["pipe", "model_b", "latent_b"], "name": "easy fullCascadeKSampler", "display_name": "EasyCascadeKsampler (Full)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy cascadeKSampler": {"input": {"required": {"pipe": ["PIPE_LINE"], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"model_c": ["MODEL"]}, "hidden": {"tile_size": "INT", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", "embeddingsList": [["AS-YoungV2-neg.pt", "AS-YoungV2.pt", "BadDream.pt", "By bad artist -neg.pt", "ERA09NEGV2.pt", "EasyNegativeV2.safetensors", "FastNegativeV2.pt", "GS-DeFeminize-neg.pt", "GS-DeMasculate-neg.pt", "GS-Girlish.pt", "Style-GravityMagic.pt", "bad-hands-5.pt", "bad-picture-chill-75v.pt", "badhandv4.pt", "badpic.pt", "easynegative.safetensors", "epiCNegative.pt", "negative_hand-neg.pt", "ng_deepnegative_v1_75t.pt", "nobg.pt", "prettify.pt", "prettyeyes.pt", "style-rustmagic-neg.pt", "style-rustmagic.pt", "verybadimagenegative_v1.3.pt"]]}}, "input_order": {"required": ["pipe", "image_output", "link_id", "save_prefix"], "optional": ["model_c"], "hidden": ["tile_size", "prompt", "extra_pnginfo", "my_unique_id", "embeddingsList"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE"], "output_is_list": [false, false], "output_name": ["pipe", "image"], "name": "easy cascadeKSampler", "display_name": "EasyCascadeKsampler", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy unSampler": {"input": {"required": {"steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "end_at_step": ["INT", {"default": 0, "min": 0, "max": 10000}], "cfg": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "normalize": [["disable", "enable"]]}, "optional": {"pipe": ["PIPE_LINE"], "optional_model": ["MODEL"], "optional_positive": ["CONDITIONING"], "optional_negative": ["CONDITIONING"], "optional_latent": ["LATENT"]}}, "input_order": {"required": ["steps", "end_at_step", "cfg", "sampler_name", "scheduler", "normalize"], "optional": ["pipe", "optional_model", "optional_positive", "optional_negative", "optional_latent"]}, "is_input_list": false, "output": ["PIPE_LINE", "LATENT"], "output_is_list": [false, false], "output_name": ["pipe", "latent"], "name": "easy unSampler", "display_name": "EasyUnSampler", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Sampler", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy hiresFix": {"input": {"required": {"model_name": [["1x-ITF-SkinDiffDetail-Lite-v1.pth", "1xDeJPG_OmniSR.pth", "4x-AnimeSharp.pth", "4x-ClearRealityV1.pth", "4x-ClearRealityV1_Soft.pth", "4x-UltraSharp.pth", "4xFaceUpDAT.pth", "4xLSDIR.pth", "4xNMKDSuperscale_4xNMKDSuperscale.pt", "4xNomos8kHAT-L_otf.pth", "4xNomosUniDAT_otf.pth", "4xRealWebPhoto_v4_dat2.pth", "4xRealWebPhoto_v4_dat2.safetensors", "4xUltrasharp_4xUltrasharpV10.pt", "4x_NMKD-Siax_200k.pth", "4x_NMKD-Superscale-SP_178000_G.pth", "4x_NickelbackFS_72000_G.pth", "4x_foolhardy_Remacri.pth", "8x_NMKD-Faces_160000_G.pth", "8x_NMKD-Superscale_150000_G.pth", "ESRGAN_4x.pth", "ESRGAN_SRx4_DF2KOST_official-ff704c30.pth", "RealESRGAN_x2.pth", "RealESRGAN_x2plus.pth", "RealESRGAN_x4.pth", "RealESRGAN_x4plus.pth", "RealESRGAN_x4plus_anime_6B.pth", "RealESRNet_x4plus.pth", "ldsr/last.ckpt", "ltxv-spatial-upscaler-0.9.7.safetensors", "nmkdSiaxCX_200k.pt", "realesr-animevideov3.pth", "realesr-general-x4v3.pth", "realesrGeneralWDNX4_v3.pt", "x1_ITF_SkinDiffDetail_Lite_v1.pth"]], "rescale_after_model": [[false, true], {"default": true}], "rescale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"]], "rescale": [["by percentage", "to Width/Height", "to longer side - maintain aspect"]], "percent": ["INT", {"default": 50, "min": 0, "max": 1000, "step": 1}], "width": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "height": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "longer_side": ["INT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "crop": [["disabled", "center"]], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"pipe": ["PIPE_LINE"], "image": ["IMAGE"], "vae": ["VAE"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["model_name", "rescale_after_model", "rescale_method", "rescale", "percent", "width", "height", "longer_side", "crop", "image_output", "link_id", "save_prefix"], "optional": ["pipe", "image", "vae"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE", "LATENT"], "output_is_list": [false, false, false], "output_name": ["pipe", "image", "latent"], "name": "easy hiresFix", "display_name": "HiresFix", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Fix", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy preDetailerFix": {"input": {"required": {"pipe": ["PIPE_LINE"], "guide_size": ["FLOAT", {"default": 256, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 768, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "align_your_steps"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"bbox_segm_pipe": ["PIPE_LINE"], "sam_pipe": ["PIPE_LINE"], "optional_image": ["IMAGE"]}}, "input_order": {"required": ["pipe", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "feather", "noise_mask", "force_inpaint", "drop_size", "wildcard", "cycle"], "optional": ["bbox_segm_pipe", "sam_pipe", "optional_image"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preDetailerFix", "display_name": "PreDetailerFix", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Fix", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy preMaskDetailerFix": {"input": {"required": {"pipe": ["PIPE_LINE"], "mask": ["MASK"], "guide_size": ["FLOAT", {"default": 384, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "mask_mode": ["BOOLEAN", {"default": true, "label_on": "masked only", "label_off": "whole"}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "refiner_ratio": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 100}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"optional_image": ["IMAGE"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}]}}, "input_order": {"required": ["pipe", "mask", "guide_size", "guide_size_for", "max_size", "mask_mode", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "feather", "crop_factor", "drop_size", "refiner_ratio", "batch_size", "cycle"], "optional": ["optional_image", "inpaint_model", "noise_mask_feather"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy preMaskDetailerFix", "display_name": "preMaskDetailerFix", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Fix", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy ultralyticsDetectorPipe": {"input": {"required": {"model_name": [["bbox/Eyeful_v2-Paired.pt", "bbox/Eyes.pt", "bbox/face_yolov8m.pt", "bbox/face_yolov8n.pt", "bbox/face_yolov8n_v2.pt", "bbox/face_yolov8s.pt", "bbox/hand_yolov8n.pt", "bbox/hand_yolov8s.pt", "bbox/lips_v1.pt", "bbox/yolov8s.pt", "segm/deepfashion2_yolov8s-seg.pt", "segm/face_yolov8m-seg_60.pt", "segm/face_yolov8n-seg2_60.pt", "segm/facial_features_yolo8x-seg.pt", "segm/flowers_seg_yolov8model.pt", "segm/hair_yolov8n-seg_60.pt", "segm/person_yolov8m-seg.pt", "segm/person_yolov8n-seg.pt", "segm/person_yolov8s-seg.pt", "segm/skin_yolov8m-seg_400.pt", "segm/skin_yolov8n-seg_400.pt", "segm/skin_yolov8n-seg_800.pt", "segm/yolov8_butterfly_custom.pt", "segm/yolov8l-seg.pt", "segm/yolov8m-seg.pt", "segm/yolov8n-seg.pt", "segm/yolov8s-seg.pt", "segm/yolov8x-seg.pt"]], "bbox_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "bbox_dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}], "bbox_crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}]}}, "input_order": {"required": ["model_name", "bbox_threshold", "bbox_dilation", "bbox_crop_factor"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["bbox_segm_pipe"], "name": "easy ultralyticsDetectorPipe", "display_name": "UltralyticsDetector (Pipe)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Fix", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy samLoaderPipe": {"input": {"required": {"model_name": [["mobile_sam.pt", "sam2_hiera_base_plus.pt", "sam2_hiera_large.pt", "sam2_hiera_small.pt", "sam2_hiera_tiny.pt", "sam_hq_vit_b.pth", "sam_hq_vit_h.pth", "sam_hq_vit_l.pth", "sam_vit_b_01ec64.pth", "sam_vit_h_4b8939.pth", "sam_vit_l_0b3195.pth"]], "device_mode": [["AUTO", "Prefer GPU", "CPU"], {"default": "AUTO"}], "sam_detection_hint": [["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"]], "sam_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "sam_threshold": ["FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}], "sam_bbox_expansion": ["INT", {"default": 0, "min": 0, "max": 1000, "step": 1}], "sam_mask_hint_threshold": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}], "sam_mask_hint_use_negative": [["False", "Small", "Outter"]]}}, "input_order": {"required": ["model_name", "device_mode", "sam_detection_hint", "sam_dilation", "sam_threshold", "sam_bbox_expansion", "sam_mask_hint_threshold", "sam_mask_hint_use_negative"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["sam_pipe"], "name": "easy samLoaderPipe", "display_name": "SAMLoader (Pipe)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Fix", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy detailerFix": {"input": {"required": {"pipe": ["PIPE_LINE"], "image_output": [["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"], {"default": "Preview"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"model": ["MODEL"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "image_output", "link_id", "save_prefix"], "optional": ["model"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, false, true, true], "output_name": ["pipe", "image", "cropped_refined", "cropped_enhanced_alpha"], "name": "easy detailerFix", "display_name": "DetailerFix", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Fix", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy pipeIn": {"input": {"required": {}, "optional": {"pipe": ["PIPE_LINE"], "model": ["MODEL"], "pos": ["CONDITIONING"], "neg": ["CONDITIONING"], "latent": ["LATENT"], "vae": ["VAE"], "clip": ["CLIP"], "image": ["IMAGE"], "xyPlot": ["XYPLOT"]}, "hidden": {"my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": [], "optional": ["pipe", "model", "pos", "neg", "latent", "vae", "clip", "image", "xyPlot"], "hidden": ["my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy pipeIn", "display_name": "Pipe In", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy pipeOut": {"input": {"required": {"pipe": ["PIPE_LINE"]}, "hidden": {"my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe"], "hidden": ["my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT"], "output_is_list": [false, false, false, false, false, false, false, false, false], "output_name": ["pipe", "model", "pos", "neg", "latent", "vae", "clip", "image", "seed"], "name": "easy pipeOut", "display_name": "Pipe Out", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy pipeEdit": {"input": {"required": {"clip_skip": ["INT", {"default": -1, "min": -24, "max": 0, "step": 1}], "optional_positive": ["STRING", {"default": "", "multiline": true}], "positive_token_normalization": [["none", "mean", "length", "length+mean"]], "positive_weight_interpretation": [["comfy", "A1111", "comfy++", "compel", "fixed attention"]], "optional_negative": ["STRING", {"default": "", "multiline": true}], "negative_token_normalization": [["none", "mean", "length", "length+mean"]], "negative_weight_interpretation": [["comfy", "A1111", "comfy++", "compel", "fixed attention"]], "a1111_prompt_style": ["BOOLEAN", {"default": false}], "conditioning_mode": [["replace", "concat", "combine", "average", "timestep"], {"default": "replace"}], "average_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "old_cond_start": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "old_cond_end": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}], "new_cond_start": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "new_cond_end": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}, "optional": {"pipe": ["PIPE_LINE"], "model": ["MODEL"], "pos": ["CONDITIONING"], "neg": ["CONDITIONING"], "latent": ["LATENT"], "vae": ["VAE"], "clip": ["CLIP"], "image": ["IMAGE"]}, "hidden": {"my_unique_id": "UNIQUE_ID", "prompt": "PROMPT"}}, "input_order": {"required": ["clip_skip", "optional_positive", "positive_token_normalization", "positive_weight_interpretation", "optional_negative", "negative_token_normalization", "negative_weight_interpretation", "a1111_prompt_style", "conditioning_mode", "average_strength", "old_cond_start", "old_cond_end", "new_cond_start", "new_cond_end"], "optional": ["pipe", "model", "pos", "neg", "latent", "vae", "clip", "image"], "hidden": ["my_unique_id", "prompt"]}, "is_input_list": false, "output": ["PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE"], "output_is_list": [false, false, false, false, false, false, false, false], "output_name": ["pipe", "model", "pos", "neg", "latent", "vae", "clip", "image"], "name": "easy pipeEdit", "display_name": "Pipe Edit", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy pipeEditPrompt": {"input": {"required": {"pipe": ["PIPE_LINE"], "positive": ["STRING", {"default": "", "multiline": true}], "negative": ["STRING", {"default": "", "multiline": true}]}, "hidden": {"my_unique_id": "UNIQUE_ID", "prompt": "PROMPT"}}, "input_order": {"required": ["pipe", "positive", "negative"], "hidden": ["my_unique_id", "prompt"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy pipeEditPrompt", "display_name": "Pipe Edit Prompt", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy pipeToBasicPipe": {"input": {"required": {"pipe": ["PIPE_LINE"]}, "hidden": {"my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe"], "hidden": ["my_unique_id"]}, "is_input_list": false, "output": ["BASIC_PIPE"], "output_is_list": [false], "output_name": ["basic_pipe"], "name": "easy pipeToBasicPipe", "display_name": "Pipe -> BasicPipe", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy pipeBatchIndex": {"input": {"required": {"pipe": ["PIPE_LINE"], "batch_index": ["INT", {"default": 0, "min": 0, "max": 63}], "length": ["INT", {"default": 1, "min": 1, "max": 64}]}, "hidden": {"my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "batch_index", "length"], "hidden": ["my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy pipeBatchIndex", "display_name": "Pipe Batch Index", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYPlot": {"input": {"required": {"grid_spacing": ["INT", {"min": 0, "max": 500, "step": 5, "default": 0}], "output_individuals": [["False", "True"], {"default": "False"}], "flip_xy": [["False", "True"], {"default": "False"}], "x_axis": [["None", "---------------------", "preSampling: steps", "preSampling: cfg", "preSampling: sampler_name", "preSampling: scheduler", "preSampling: denoise", "preSampling: seed", "---------------------", "loader: ckpt_name", "loader: vae_name", "loader: clip_skip", "loader: lora_name", "loader: lora_model_strength", "loader: lora_clip_strength", "loader: positive", "loader: negative"], {"default": "None"}], "x_values": ["STRING", {"default": "", "multiline": true, "placeholder": "insert values seperated by \"; \""}], "y_axis": [["None", "---------------------", "preSampling: steps", "preSampling: cfg", "preSampling: sampler_name", "preSampling: scheduler", "preSampling: denoise", "preSampling: seed", "---------------------", "loader: ckpt_name", "loader: vae_name", "loader: clip_skip", "loader: lora_name", "loader: lora_model_strength", "loader: lora_clip_strength", "loader: positive", "loader: negative"], {"default": "None"}], "y_values": ["STRING", {"default": "", "multiline": true, "placeholder": "insert values seperated by \"; \""}]}, "optional": {"pipe": ["PIPE_LINE"]}, "hidden": {"plot_dict": [{"steps": {"min": 1, "max": 100, "step": 1}, "cfg": {"min": 0.0, "max": 100.0, "step": 1.0}, "sampler_name": ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], "scheduler": ["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], "denoise": {"min": 0.0, "max": 1.0, "step": 0.01}, "seed": {"min": 0, "max": 1125899906842624}, "ckpt_name": ["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"], "vae_name": ["Baked-VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"], "clip_skip": {"min": -24, "max": -1, "step": 1}, "lora_name": ["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], "lora_model_strength": {"min": -4.0, "max": 4.0, "step": 0.01}, "lora_clip_strength": {"min": -4.0, "max": 4.0, "step": 0.01}, "positive": [], "negative": []}]}}, "input_order": {"required": ["grid_spacing", "output_individuals", "flip_xy", "x_axis", "x_values", "y_axis", "y_values"], "optional": ["pipe"], "hidden": ["plot_dict"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy XYPlot", "display_name": "XY Plot", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYPlotAdvanced": {"input": {"required": {"pipe": ["PIPE_LINE"], "grid_spacing": ["INT", {"min": 0, "max": 500, "step": 5, "default": 0}], "output_individuals": [["False", "True"], {"default": "False"}], "flip_xy": [["False", "True"], {"default": "False"}]}, "optional": {"X": ["X_Y"], "Y": ["X_Y"], "font": [["None"]]}, "hidden": {"my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "grid_spacing", "output_individuals", "flip_xy"], "optional": ["X", "Y", "font"], "hidden": ["my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE"], "output_is_list": [false], "output_name": ["pipe"], "name": "easy XYPlotAdvanced", "display_name": "XY Plot Advanced", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: Seeds++ Batch": {"input": {"required": {"batch_count": ["INT", {"default": 3, "min": 1, "max": 50}]}}, "input_order": {"required": ["batch_count"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: Seeds++ Batch", "display_name": "XY Inputs: Seeds++ Batch //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: Steps": {"input": {"required": {"target_parameter": [["steps", "start_at_step", "end_at_step"]], "batch_count": ["INT", {"default": 3, "min": 0, "max": 50}], "first_step": ["INT", {"default": 10, "min": 1, "max": 10000}], "last_step": ["INT", {"default": 20, "min": 1, "max": 10000}], "first_start_step": ["INT", {"default": 0, "min": 0, "max": 10000}], "last_start_step": ["INT", {"default": 10, "min": 0, "max": 10000}], "first_end_step": ["INT", {"default": 10, "min": 0, "max": 10000}], "last_end_step": ["INT", {"default": 20, "min": 0, "max": 10000}]}}, "input_order": {"required": ["target_parameter", "batch_count", "first_step", "last_step", "first_start_step", "last_start_step", "first_end_step", "last_end_step"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: Steps", "display_name": "XY Inputs: Steps //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: CFG Scale": {"input": {"required": {"batch_count": ["INT", {"default": 3, "min": 0, "max": 50}], "first_cfg": ["FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0}], "last_cfg": ["FLOAT", {"default": 9.0, "min": 0.0, "max": 100.0}]}}, "input_order": {"required": ["batch_count", "first_cfg", "last_cfg"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: CFG Scale", "display_name": "XY Inputs: CFG Scale //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: FluxGuidance": {"input": {"required": {"batch_count": ["INT", {"default": 3, "min": 0, "max": 50}], "first_guidance": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}], "last_guidance": ["FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0}]}}, "input_order": {"required": ["batch_count", "first_guidance", "last_guidance"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: FluxGuidance", "display_name": "XY Inputs: Flux Guidance //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: Sampler/Scheduler": {"input": {"required": {"target_parameter": [["sampler", "scheduler", "sampler & scheduler"]], "input_count": ["INT", {"default": 1, "min": 1, "max": 30, "step": 1}], "sampler_1": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_1": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_2": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_2": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_3": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_3": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_4": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_4": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_5": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_5": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_6": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_6": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_7": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_7": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_8": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_8": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_9": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_9": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_10": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_10": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_11": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_11": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_12": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_12": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_13": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_13": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_14": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_14": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_15": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_15": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_16": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_16": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_17": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_17": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_18": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_18": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_19": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_19": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_20": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_20": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_21": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_21": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_22": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_22": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_23": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_23": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_24": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_24": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_25": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_25": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_26": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_26": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_27": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_27": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_28": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_28": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_29": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_29": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "sampler_30": [["None", "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler_30": [["None", "simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]]}}, "input_order": {"required": ["target_parameter", "input_count", "sampler_1", "scheduler_1", "sampler_2", "scheduler_2", "sampler_3", "scheduler_3", "sampler_4", "scheduler_4", "sampler_5", "scheduler_5", "sampler_6", "scheduler_6", "sampler_7", "scheduler_7", "sampler_8", "scheduler_8", "sampler_9", "scheduler_9", "sampler_10", "scheduler_10", "sampler_11", "scheduler_11", "sampler_12", "scheduler_12", "sampler_13", "scheduler_13", "sampler_14", "scheduler_14", "sampler_15", "scheduler_15", "sampler_16", "scheduler_16", "sampler_17", "scheduler_17", "sampler_18", "scheduler_18", "sampler_19", "scheduler_19", "sampler_20", "scheduler_20", "sampler_21", "scheduler_21", "sampler_22", "scheduler_22", "sampler_23", "scheduler_23", "sampler_24", "scheduler_24", "sampler_25", "scheduler_25", "sampler_26", "scheduler_26", "sampler_27", "scheduler_27", "sampler_28", "scheduler_28", "sampler_29", "scheduler_29", "sampler_30", "scheduler_30"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: Sampler/Scheduler", "display_name": "XY Inputs: Sampler/Scheduler //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: Denoise": {"input": {"required": {"batch_count": ["INT", {"default": 3, "min": 0, "max": 50}], "first_denoise": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.1}], "last_denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1}]}}, "input_order": {"required": ["batch_count", "first_denoise", "last_denoise"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: Denoise", "display_name": "XY Inputs: Denoise //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: Checkpoint": {"input": {"required": {"input_mode": [["Ckpt Names", "Ckpt Names+ClipSkip", "Ckpt Names+ClipSkip+VAE"]], "ckpt_count": ["INT", {"default": 3, "min": 0, "max": 10, "step": 1}], "ckpt_name_1": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_1": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_1": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_2": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_2": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_2": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_3": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_3": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_3": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_4": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_4": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_4": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_5": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_5": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_5": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_6": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_6": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_6": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_7": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_7": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_7": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_8": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_8": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_8": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_9": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_9": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_9": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]], "ckpt_name_10": [["None", "3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "clip_skip_10": ["INT", {"default": -1, "min": -24, "max": -1, "step": 1}], "vae_name_10": [["Baked VAE", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"]]}, "optional": {"optional_lora_stack": ["LORA_STACK"]}}, "input_order": {"required": ["input_mode", "ckpt_count", "ckpt_name_1", "clip_skip_1", "vae_name_1", "ckpt_name_2", "clip_skip_2", "vae_name_2", "ckpt_name_3", "clip_skip_3", "vae_name_3", "ckpt_name_4", "clip_skip_4", "vae_name_4", "ckpt_name_5", "clip_skip_5", "vae_name_5", "ckpt_name_6", "clip_skip_6", "vae_name_6", "ckpt_name_7", "clip_skip_7", "vae_name_7", "ckpt_name_8", "clip_skip_8", "vae_name_8", "ckpt_name_9", "clip_skip_9", "vae_name_9", "ckpt_name_10", "clip_skip_10", "vae_name_10"], "optional": ["optional_lora_stack"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: Checkpoint", "display_name": "XY Inputs: Checkpoint //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: Lora": {"input": {"required": {"input_mode": [["Lora Names", "Lora Names+Weights"]], "lora_count": ["INT", {"default": 3, "min": 0, "max": 10, "step": 1}], "model_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_1": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_1": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_1": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_2": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_2": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_2": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_3": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_3": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_3": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_4": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_4": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_4": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_5": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_5": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_5": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_6": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_6": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_6": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_7": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_7": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_7": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_8": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_8": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_8": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_9": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_9": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_9": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_name_10": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "model_str_10": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "clip_str_10": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}, "optional": {"optional_lora_stack": ["LORA_STACK"], "display_trigger_word": ["BOOLEAN", {"display_trigger_word": true, "tooltip": "Trigger words showing lora model pass through the model's metadata, but not necessarily accurately."}]}}, "input_order": {"required": ["input_mode", "lora_count", "model_strength", "clip_strength", "lora_name_1", "model_str_1", "clip_str_1", "lora_name_2", "model_str_2", "clip_str_2", "lora_name_3", "model_str_3", "clip_str_3", "lora_name_4", "model_str_4", "clip_str_4", "lora_name_5", "model_str_5", "clip_str_5", "lora_name_6", "model_str_6", "clip_str_6", "lora_name_7", "model_str_7", "clip_str_7", "lora_name_8", "model_str_8", "clip_str_8", "lora_name_9", "model_str_9", "clip_str_9", "lora_name_10", "model_str_10", "clip_str_10"], "optional": ["optional_lora_stack", "display_trigger_word"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: Lora", "display_name": "XY Inputs: Lora //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: ModelMergeBlocks": {"input": {"required": {"ckpt_name_1": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "ckpt_name_2": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]], "vae_use": [["Use Model 1", "Use Model 2", "FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors"], {"default": "Use Model 1"}], "preset": [["Preset", "MMB-ALL:1,1,1", "MMB-IN:1,0,0", "MMB-MID:0,1,0", "MMB-OUT:0,0,1", "MMB-INMID:1,1,0", "MMB-INOUT:1,0,1", "MMB-MIDOUT:0,1,1", "MMB-NONE:0,0,0", "@MMBN-FULL-TEST:27"], {"default": "preset"}], "values": ["STRING", {"default": "1,0,0; \n0,1,0; \n0,0,1; \n1,1,0; \n1,0,1; \n0,1,1; ", "multiline": true, "placeholder": "Support 2 methods:\n\n1.input, middle, out in same line and insert values seperated by \"; \"\n\n2.model merge block number seperated by \", \" in same line and insert values seperated by \"; \""}]}, "hidden": {"my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["ckpt_name_1", "ckpt_name_2", "vae_use", "preset", "values"], "hidden": ["my_unique_id"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: ModelMergeBlocks", "display_name": "XY Inputs: ModelMergeBlocks //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: PromptSR": {"input": {"required": {"target_prompt": [["positive", "negative"]], "search_txt": ["STRING", {"default": "", "multiline": false}], "replace_all_text": ["BOOLEAN", {"default": false}], "replace_count": ["INT", {"default": 3, "min": 1, "max": 29}], "replace_1": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_1"}], "replace_2": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_2"}], "replace_3": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_3"}], "replace_4": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_4"}], "replace_5": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_5"}], "replace_6": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_6"}], "replace_7": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_7"}], "replace_8": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_8"}], "replace_9": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_9"}], "replace_10": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_10"}], "replace_11": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_11"}], "replace_12": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_12"}], "replace_13": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_13"}], "replace_14": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_14"}], "replace_15": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_15"}], "replace_16": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_16"}], "replace_17": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_17"}], "replace_18": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_18"}], "replace_19": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_19"}], "replace_20": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_20"}], "replace_21": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_21"}], "replace_22": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_22"}], "replace_23": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_23"}], "replace_24": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_24"}], "replace_25": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_25"}], "replace_26": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_26"}], "replace_27": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_27"}], "replace_28": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_28"}], "replace_29": ["STRING", {"default": "", "multiline": false, "placeholder": "replace_29"}]}}, "input_order": {"required": ["target_prompt", "search_txt", "replace_all_text", "replace_count", "replace_1", "replace_2", "replace_3", "replace_4", "replace_5", "replace_6", "replace_7", "replace_8", "replace_9", "replace_10", "replace_11", "replace_12", "replace_13", "replace_14", "replace_15", "replace_16", "replace_17", "replace_18", "replace_19", "replace_20", "replace_21", "replace_22", "replace_23", "replace_24", "replace_25", "replace_26", "replace_27", "replace_28", "replace_29"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: PromptSR", "display_name": "XY Inputs: PromptSR //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: ControlNet": {"input": {"required": {"control_net_name": [["FLUX.1-dev-ControlNet-Union-Pro-2.0.safetensors", "FLUX.1/InstantX-FLUX1-Dev-Union/diffusion_pytorch_model.safetensors", "FLUX.1/Shakker-Labs-ControlNet-Union-Pro/diffusion_pytorch_model.safetensors", "FLUX.1/flux_shakker_labs_union_pro-fp8_e4m3fn.safetensors", "Flux.1-dev-Controlnet-Upscaler.safetensors", "Qwen-Image-InstantX-ControlNet-Inpainting.safetensors", "Qwen-Image-InstantX-ControlNet-Union.safetensors", "SDXL/OpenPoseXL2.safetensors", "SDXL/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "Wan21_Uni3C_controlnet_fp16.safetensors", "animatediff/animatediffControlnet_sd15FP32.safetensors", "animatediff/v3_sd15_sparsectrl_rgb.ckpt", "animatediff/v3_sd15_sparsectrl_scribble.ckpt", "coadapter-canny-sd15v1.safetensors", "coadapter-color-sd15v1.safetensors", "coadapter-depth-sd15v1.safetensors", "coadapter-fuser-sd15v1.safetensors", "coadapter-sketch-sd15v1.safetensors", "coadapter-style-sd15v1.safetensors", "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", "control_v11f1p_sd15_depth_fp16.safetensors", "control_v11p_sd15_canny.pth", "control_v11p_sd15_openpose.pth", "control_v11p_sd15_openpose_fp16.safetensors", "control_v11p_sd15_scribble_fp16.safetensors", "flux/flux-canny-controlnet-v3.safetensors", "flux/flux-canny-controlnet.safetensors", "flux/flux-canny-controlnet_v2.safetensors", "flux/flux-depth-controlnet-v3.safetensors", "flux/flux-depth-controlnet.safetensors", "flux/flux-depth-controlnet_v2.safetensors", "flux/flux-hed-controlnet.safetensors", "flux/flux.1-dev-controlnet-union/diffusion_pytorch_model.safetensors", "instantid/diffusion_pytorch_model.safetensors", "sd1/coadapter-canny-sd15v1.pth", "sd1/coadapter-color-sd15v1.pth", "sd1/coadapter-depth-sd15v1.pth", "sd1/coadapter-fuser-sd15v1.pth", "sd1/coadapter-sketch-sd15v1.pth", "sd1/coadapter-style-sd15v1.pth", "sd1/control_sd15_inpaint_depth_hand_fp16.safetensors", "sd1/control_v11e_sd15_ip2p.pth", "sd1/control_v11e_sd15_shuffle.pth", "sd1/control_v11f1e_sd15_tile.pth", "sd1/control_v11f1p_sd15_depth.pth", "sd1/control_v11p_sd15_canny.pth", "sd1/control_v11p_sd15_inpaint.pth", "sd1/control_v11p_sd15_lineart.pth", "sd1/control_v11p_sd15_mlsd.pth", "sd1/control_v11p_sd15_normalbae.pth", "sd1/control_v11p_sd15_openpose.pth", "sd1/control_v11p_sd15_scribble.pth", "sd1/control_v11p_sd15_seg.pth", "sd1/control_v11p_sd15_softedge.pth", "sd1/control_v11p_sd15s2_lineart_anime.pth", "sd1/control_v1p_sd15_qrcode_monster.safetensors", "sd1/controlnet_checkpoint.ckpt", "sd1/diff_control_sd15_canny_fp16.safetensors", "sd1/diff_control_sd15_depth_fp16.safetensors", "sd1/diff_control_sd15_hed_fp16.safetensors", "sd1/diff_control_sd15_mlsd_fp16.safetensors", "sd1/diff_control_sd15_normal_fp16.safetensors", "sd1/diff_control_sd15_openpose_fp16.safetensors", "sd1/diff_control_sd15_scribble_fp16.safetensors", "sd1/diff_control_sd15_seg_fp16.safetensors", "sd1/ioclab_sd15_recolor.safetensors", "sd1/lightingBasedPicture_v10.safetensors", "sd1/t2iadapter_canny_sd14v1.pth", "sd1/t2iadapter_canny_sd15v2.pth", "sd1/t2iadapter_color_sd14v1.pth", "sd1/t2iadapter_depth_sd14v1.pth", "sd1/t2iadapter_depth_sd15v2.pth", "sd1/t2iadapter_keypose_sd14v1.pth", "sd1/t2iadapter_openpose_sd14v1.pth", "sd1/t2iadapter_seg_sd14v1.pth", "sd1/t2iadapter_sketch_sd14v1.pth", "sd1/t2iadapter_sketch_sd15v2.pth", "sd1/t2iadapter_style_sd14v1.pth", "sd1/t2iadapter_zoedepth_sd15v1.pth", "sd3.5_large_controlnet_blur.safetensors", "sd3.5_large_controlnet_canny.safetensors", "sd3.5_large_controlnet_depth.safetensors", "sd35/sd3.5_large_controlnet_blur.safetensors", "sd35/sd3.5_large_controlnet_canny.safetensors", "sd35/sd3.5_large_controlnet_depth.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v1_fp32.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_fp16.safetensors", "sdxl/TTPLANET_Controlnet_Tile_realistic_v2_rank256.safetensors", "sdxl/control-LoRAs-rank128/control-lora-canny-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-depth-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors", "sdxl/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors", "sdxl/control-LoRAs-rank256/control-lora-canny-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-depth-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors", "sdxl/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model.safetensors", "sdxl/controlnet-union-sdxl-1.0/diffusion_pytorch_model_promax.safetensors", "sdxl/depth-zoe-xl-v1.0-controlnet.safetensors", "sdxl/diffusers_xl_canny_full.safetensors", "sdxl/diffusers_xl_canny_mid.safetensors", "sdxl/diffusers_xl_canny_small.safetensors", "sdxl/diffusers_xl_depth_full.safetensors", "sdxl/diffusers_xl_depth_mid.safetensors", "sdxl/diffusers_xl_depth_small.safetensors", "sdxl/mistoLine_fp16.safetensors", "sdxl/mistoLine_rank256.safetensors", "sdxl/sai_xl_canny_128lora.safetensors", "sdxl/sai_xl_canny_256lora.safetensors", "sdxl/sai_xl_depth_128lora.safetensors", "sdxl/sai_xl_depth_256lora.safetensors", "sdxl/sai_xl_recolor_128lora.safetensors", "sdxl/sai_xl_recolor_256lora.safetensors", "sdxl/sai_xl_sketch_128lora.safetensors", "sdxl/sai_xl_sketch_256lora.safetensors", "sdxl/sargezt_xl_depth.safetensors", "sdxl/sargezt_xl_depth_faid_vidit.safetensors", "sdxl/sargezt_xl_depth_zeed.safetensors", "sdxl/sargezt_xl_softedge.safetensors", "sdxl/t2i-adapter_diffusers_xl_canny.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_midas.safetensors", "sdxl/t2i-adapter_diffusers_xl_depth_zoe.safetensors", "sdxl/t2i-adapter_diffusers_xl_lineart.safetensors", "sdxl/t2i-adapter_diffusers_xl_openpose.safetensors", "sdxl/t2i-adapter_diffusers_xl_sketch.safetensors", "sdxl/t2i-adapter_xl_canny.safetensors", "sdxl/t2i-adapter_xl_openpose.safetensors", "sdxl/t2i-adapter_xl_sketch.safetensors", "sdxl/thibaud_xl_openpose.safetensors", "sdxl/thibaud_xl_openpose_256lora.safetensors", "sdxl/xinsir_depth.safetensors", "t2iadapter_canny_sd14v1.safetensors", "t2iadapter_canny_sd15v2.safetensors", "t2iadapter_color_sd14v1.safetensors", "t2iadapter_depth_sd14v1.safetensors", "t2iadapter_depth_sd15v2.safetensors", "t2iadapter_keypose_sd14v1.safetensors", "t2iadapter_openpose_sd14v1.safetensors", "t2iadapter_seg_sd14v1.safetensors", "t2iadapter_sketch_sd14v1.safetensors", "t2iadapter_sketch_sd15v2.safetensors", "t2iadapter_style_sd14v1.safetensors", "t2iadapter_zoedepth_sd15v1.safetensors"]], "image": ["IMAGE"], "target_parameter": [["strength", "start_percent", "end_percent"]], "batch_count": ["INT", {"default": 3, "min": 1, "max": 30}], "first_strength": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}], "last_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "first_start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "last_start_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "first_end_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "last_end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"control_net": ["CONTROL_NET"]}}, "input_order": {"required": ["control_net_name", "image", "target_parameter", "batch_count", "first_strength", "last_strength", "first_start_percent", "last_start_percent", "first_end_percent", "last_end_percent", "strength", "start_percent", "end_percent"], "optional": ["control_net"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: ControlNet", "display_name": "XY Inputs: Controlnet //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: PositiveCond": {"input": {"optional": {"positive_1": ["CONDITIONING"], "positive_2": ["CONDITIONING"], "positive_3": ["CONDITIONING"], "positive_4": ["CONDITIONING"]}}, "input_order": {"optional": ["positive_1", "positive_2", "positive_3", "positive_4"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: PositiveCond", "display_name": "XY Inputs: PosCond //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: PositiveCondList": {"input": {"required": {"positive": ["CONDITIONING"]}}, "input_order": {"required": ["positive"]}, "is_input_list": true, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: PositiveCondList", "display_name": "XY Inputs: PosCondList //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: NegativeCond": {"input": {"optional": {"negative_1": ["CONDITIONING"], "negative_2": ["CONDITIONING"], "negative_3": ["CONDITIONING"], "negative_4": ["CONDITIONING"]}}, "input_order": {"optional": ["negative_1", "negative_2", "negative_3", "negative_4"]}, "is_input_list": false, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: NegativeCond", "display_name": "XY Inputs: NegCond //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy XYInputs: NegativeCondList": {"input": {"required": {"negative": ["CONDITIONING"]}}, "input_order": {"required": ["negative"]}, "is_input_list": true, "output": ["X_Y"], "output_is_list": [false], "output_name": ["X or Y"], "name": "easy XYInputs: NegativeCondList", "display_name": "XY Inputs: NegCondList //EasyUse", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/XY Inputs", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageInsetCrop": {"input": {"required": {"image": ["IMAGE"], "measurement": [["Pixels", "Percentage"]], "left": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "right": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "top": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "bottom": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}]}}, "input_order": {"required": ["image", "measurement", "left", "right", "top", "bottom"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "easy imageInsetCrop", "display_name": "ImageInsetCrop", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageCount": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["count"], "name": "easy imageCount", "display_name": "ImageCount", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imagesCountInDirectory": {"input": {"required": {"directory": ["STRING"], "start_index": ["INT", {"default": 0, "min": 0, "step": 1}], "limit": ["INT", {"default": -1, "min": -1, "max": 10000}]}}, "input_order": {"required": ["directory", "start_index", "limit"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["count"], "name": "easy imagesCountInDirectory", "display_name": "imagesCountInDirectory", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageSize": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["INT", "INT"], "output_is_list": [false, false], "output_name": ["width_int", "height_int"], "name": "easy imageSize", "display_name": "ImageSize", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imageSizeBySide": {"input": {"required": {"image": ["IMAGE"], "side": [["Longest", "Shortest"]]}}, "input_order": {"required": ["image", "side"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["resolution"], "name": "easy imageSizeBySide", "display_name": "ImageSize (Side)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imageSizeByLongerSide": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["resolution"], "name": "easy imageSizeByLongerSide", "display_name": "ImageSize (LongerSide)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imagePixelPerfect": {"input": {"required": {"image": ["IMAGE"], "resize_mode": [["Just Resize", "Crop and Resize", "Resize and Fill"], {"default": "Just Resize"}]}}, "input_order": {"required": ["image", "resize_mode"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["resolution"], "name": "easy imagePixelPerfect", "display_name": "ImagePixelPerfect", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imageScaleDown": {"input": {"required": {"images": ["IMAGE"], "width": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "height": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "crop": [["disabled", "center"]]}}, "input_order": {"required": ["images", "width", "height", "crop"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "easy imageScaleDown", "display_name": "Image Scale Down", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageScaleDownBy": {"input": {"required": {"images": ["IMAGE"], "scale_by": ["FLOAT", {"default": 0.5, "min": 0.01, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["images", "scale_by"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "easy imageScaleDownBy", "display_name": "Image Scale Down By", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageScaleDownToSize": {"input": {"required": {"images": ["IMAGE"], "size": ["INT", {"default": 512, "min": 1, "max": 16384, "step": 1}], "mode": ["BOOLEAN", {"default": true, "label_on": "max", "label_off": "min"}]}}, "input_order": {"required": ["images", "size", "mode"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "easy imageScaleDownToSize", "display_name": "Image Scale Down To Size", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageScaleToNormPixels": {"input": {"required": {"image": ["IMAGE"], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]], "scale_by": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}]}}, "input_order": {"required": ["image", "upscale_method", "scale_by"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "easy imageScaleToNormPixels", "display_name": "ImageScaleToNormPixels", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageRatio": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["INT", "INT", "FLOAT", "FLOAT"], "output_is_list": [false, false, false, false], "output_name": ["width_ratio_int", "height_ratio_int", "width_ratio_float", "height_ratio_float"], "name": "easy imageRatio", "display_name": "ImageRatio", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imageConcat": {"input": {"required": {"image1": ["IMAGE"], "image2": ["IMAGE"], "direction": [["right", "down", "left", "up"], {"default": "right"}], "match_image_size": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["image1", "image2", "direction", "match_image_size"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "easy imageConcat", "display_name": "imageConcat", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageListToImageBatch": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "easy imageListToImageBatch", "display_name": "Image List To Image Batch", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageBatchToImageList": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "name": "easy imageBatchToImageList", "display_name": "Image Batch To Image List", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageSplitList": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["images", "images", "images"], "name": "easy imageSplitList", "display_name": "imageSplitList", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageSplitGrid": {"input": {"required": {"images": ["IMAGE"], "row": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}], "column": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}}, "input_order": {"required": ["images", "row", "column"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "name": "easy imageSplitGrid", "display_name": "imageSplitGrid", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imagesSplitImage": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, false, false, false, false], "output_name": ["image1", "image2", "image3", "image4", "image5"], "name": "easy imagesSplitImage", "display_name": "imagesSplitImage", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageSplitTiles": {"input": {"required": {"image": ["IMAGE"], "overlap_ratio": ["FLOAT", {"default": 0, "min": 0, "max": 0.5, "step": 0.01}], "overlap_offset": ["INT", {"default": 0, "min": -8192, "max": 8192, "step": 1}], "tiles_rows": ["INT", {"default": 2, "min": 1, "max": 50, "step": 1}], "tiles_cols": ["INT", {"default": 2, "min": 1, "max": 50, "step": 1}]}, "optional": {"norm": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["image", "overlap_ratio", "overlap_offset", "tiles_rows", "tiles_cols"], "optional": ["norm"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "OVERLAP", "INT"], "output_is_list": [false, false, false, false], "output_name": ["tiles", "masks", "overlap", "total"], "name": "easy imageSplitTiles", "display_name": "imageSplitTiles", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageTilesFromBatch": {"input": {"required": {"tiles": ["IMAGE"], "masks": ["MASK"], "overlap": ["OVERLAP"], "index": ["INT", {"default": 0, "min": 0, "max": 10000, "step": 1}]}}, "input_order": {"required": ["tiles", "masks", "overlap", "index"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["image", "mask", "x", "y"], "name": "easy imageTilesFromBatch", "display_name": "imageTilesFromBatch", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageCropFromMask": {"input": {"required": {"image": ["IMAGE"], "mask": ["MASK"], "image_crop_multi": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}], "mask_crop_multi": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}], "bbox_smooth_alpha": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["image", "mask", "image_crop_multi", "mask_crop_multi", "bbox_smooth_alpha"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "BBOX"], "output_is_list": [false, false, false], "output_name": ["crop_image", "crop_mask", "bbox"], "name": "easy imageCropFromMask", "display_name": "imageCropFromMask", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageUncropFromBBOX": {"input": {"required": {"original_image": ["IMAGE"], "crop_image": ["IMAGE"], "bbox": ["BBOX"], "border_blending": ["FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}], "use_square_mask": ["BOOLEAN", {"default": true}]}, "optional": {"optional_mask": ["MASK"]}}, "input_order": {"required": ["original_image", "crop_image", "bbox", "border_blending", "use_square_mask"], "optional": ["optional_mask"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "easy imageUncropFromBBOX", "display_name": "imageUncropFromBBOX", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy imageSave": {"input": {"required": {"images": ["IMAGE"], "filename_prefix": ["STRING", {"default": "ComfyUI"}], "only_preview": ["BOOLEAN", {"default": false}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "filename_prefix", "only_preview"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "easy imageSave", "display_name": "Save Image (Simple)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imageRemBg": {"input": {"required": {"images": ["IMAGE"], "rem_mode": [["RMBG-2.0", "RMBG-1.4", "Inspyrenet", "BEN2"], {"default": "RMBG-1.4"}], "image_output": [["Hide", "Preview", "Save", "Hide/Save"], {"default": "Preview"}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"torchscript_jit": ["BOOLEAN", {"default": false}], "add_background": [["none", "white", "black"], {"default": "none"}], "refine_foreground": ["BOOLEAN", {"default": false}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "rem_mode", "image_output", "save_prefix"], "optional": ["torchscript_jit", "add_background", "refine_foreground"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["image", "mask"], "name": "easy imageRemBg", "display_name": "Image Remove Bg", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imageChooser": {"input": {"required": {"mode": [["Always Pause", "Keep Last Selection"], {"default": "Always Pause"}], "preview_rescale": ["FLOAT", {"default": 1.0, "min": 0.05, "max": 1.0, "step": 0.05}]}, "optional": {"images": ["IMAGE"]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["mode", "preview_rescale"], "optional": ["images"], "hidden": ["prompt", "my_unique_id", "extra_pnginfo"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "easy imageChooser", "display_name": "Image Chooser", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": ["preview", "preview image", "show image", "view image", "display image", "image viewer"], "essentials_category": "Basics"}, "easy imageColorMatch": {"input": {"required": {"image_ref": ["IMAGE"], "image_target": ["IMAGE"], "method": [["wavelet", "adain", "mkl", "hm", "reinhard", "mvgd", "hm-mvgd-hm", "hm-mkl-hm"]], "image_output": [["Hide", "Preview", "Save", "Hide/Save"], {"default": "Preview"}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["image_ref", "image_target", "method", "image_output", "save_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "easy imageColorMatch", "display_name": "Image Color Match", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": ["preview", "preview image", "show image", "view image", "display image", "image viewer"], "essentials_category": "Basics"}, "easy imageDetailTransfer": {"input": {"required": {"target": ["IMAGE"], "source": ["IMAGE"], "mode": [["add", "multiply", "screen", "overlay", "soft_light", "hard_light", "color_dodge", "color_burn", "difference", "exclusion", "divide"], {"default": "add"}], "blur_sigma": ["FLOAT", {"default": 1.0, "min": 0.1, "max": 100.0, "step": 0.01}], "blend_factor": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001, "round": 0.001}], "image_output": [["Hide", "Preview", "Save", "Hide/Save"], {"default": "Preview"}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {"mask": ["MASK"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["target", "source", "mode", "blur_sigma", "blend_factor", "image_output", "save_prefix"], "optional": ["mask"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "easy imageDetailTransfer", "display_name": "Image Detail Transfer", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imageInterrogator": {"input": {"required": {"image": ["IMAGE"], "mode": [["fast", "classic", "best", "negative"]], "use_lowvram": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["image", "mode", "use_lowvram"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [true], "output_name": ["prompt"], "name": "easy imageInterrogator", "display_name": "Image To Prompt", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy loadImagesForLoop": {"input": {"required": {"directory": ["STRING", {"default": ""}]}, "optional": {"start_index": ["INT", {"default": 0, "min": 0, "step": 1}], "limit": ["INT", {"default": -1, "min": -1, "max": 10000}], "initial_value1": ["*"], "initial_value2": ["*"]}, "hidden": {"initial_value0": ["*"], "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["directory"], "optional": ["start_index", "limit", "initial_value1", "initial_value2"], "hidden": ["initial_value0", "prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["FLOW_CONTROL", "INT", "IMAGE", "MASK", "STRING", "*", "*"], "output_is_list": [false, false, false, false, false, false, false], "output_name": ["flow", "index", "image", "mask", "name", "value1", "value2"], "name": "easy loadImagesForLoop", "display_name": "Load Images For Loop", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy loadImageBase64": {"input": {"required": {"base64_data": ["STRING", {"default": ""}], "image_output": [["Hide", "Preview", "Save", "Hide/Save"], {"default": "Preview"}], "save_prefix": ["STRING", {"default": "ComfyUI"}]}, "optional": {}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["base64_data", "image_output", "save_prefix"], "optional": [], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "easy loadImageBase64", "display_name": "Load Image (Base64)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image/LoadImage", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy imageToBase64": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "easy imageToBase64", "display_name": "Image To Base64", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy joinImageBatch": {"input": {"required": {"images": ["IMAGE"], "mode": [["horizontal", "vertical"], {"default": "horizontal"}]}}, "input_order": {"required": ["images", "mode"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "easy joinImageBatch", "display_name": "JoinImageBatch", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy humanSegmentation": {"input": {"required": {"image": ["IMAGE"], "method": [["selfie_multiclass_256x256", "human_parsing_lip", "human_parts (deeplabv3p)", "segformer_b3_clothes", "segformer_b3_fashion", "face_parsing"]], "confidence": ["FLOAT", {"default": 0.4, "min": 0.05, "max": 0.95, "step": 0.01}], "crop_multi": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.001}], "mask_components": ["EASY_COMBO", {"options": [{"label": "Background", "value": 0}], "multi_select": {"placeholder": "select mask components", "chip": true, "max_selected_labels": 4}}]}, "hidden": {"prompt": "PROMPT", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["image", "method", "confidence", "crop_multi", "mask_components"], "hidden": ["prompt", "my_unique_id"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "BBOX"], "output_is_list": [false, false, false], "output_name": ["image", "mask", "bbox"], "name": "easy humanSegmentation", "display_name": "Human Segmentation", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Segmentation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy removeLocalImage": {"input": {"required": {"any": ["*"], "file_name": ["STRING", {"default": ""}]}}, "input_order": {"required": ["any", "file_name"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "easy removeLocalImage", "display_name": "Remove Local Image", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "easy makeImageForICLora": {"input": {"required": {"image_1": ["IMAGE"], "direction": [["top-bottom", "left-right"], {"default": "left-right"}], "pixels": ["INT", {"default": 0, "max": 16384, "min": 0, "step": 8, "tooltip": "The pixel of the output image is not set when it is 0"}], "method": [["uniform height", "uniform width", "auto"], {"default": "auto"}]}, "optional": {"image_2": ["IMAGE"], "mask_1": ["MASK"], "mask_2": ["MASK"]}}, "input_order": {"required": ["image_1", "direction", "pixels", "method"], "optional": ["image_2", "mask_1", "mask_2"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "MASK", "INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false, false, false, false], "output_name": ["image", "mask", "context_mask", "width", "height", "x", "y"], "name": "easy makeImageForICLora", "display_name": "Make Image For ICLora", "description": "make Image for ICLora to Re-paint", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy string": {"input": {"required": {"value": ["STRING", {"default": "", "multiline": false}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["string"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy string", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Type", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy int": {"input": {"required": {"value": ["INT", {"default": 0, "min": -999999, "max": 999999}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["int"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy int", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Type", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy rangeInt": {"input": {"required": {"range_mode": ["COMBO", {"default": "step", "multiselect": false, "options": ["step", "num_steps"]}], "start": ["INT", {"default": 0, "min": -4096, "max": 4096, "step": 1}], "stop": ["INT", {"default": 0, "min": -4096, "max": 4096, "step": 1}], "step": ["INT", {"default": 0, "min": -4096, "max": 4096, "step": 1}], "num_steps": ["INT", {"default": 0, "min": -4096, "max": 4096, "step": 1}], "end_mode": ["COMBO", {"default": "Inclusive", "multiselect": false, "options": ["Inclusive", "Exclusive"]}]}}, "input_order": {"required": ["range_mode", "start", "stop", "step", "num_steps", "end_mode"]}, "is_input_list": true, "output": ["INT", "INT"], "output_is_list": [true, true], "output_name": ["range", "range_sizes"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy rangeInt", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Type", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy float": {"input": {"required": {"value": ["FLOAT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615, "step": 0.01}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["float"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy float", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Type", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy rangeFloat": {"input": {"required": {"range_mode": ["COMBO", {"default": "step", "multiselect": false, "options": ["step", "num_steps"]}], "start": ["FLOAT", {"default": 0, "min": -4096, "max": 4096, "step": 0.1}], "stop": ["FLOAT", {"default": 0, "min": -4096, "max": 4096, "step": 0.1}], "step": ["FLOAT", {"default": 0, "min": -4096, "max": 4096, "step": 0.1}], "num_steps": ["INT", {"default": 0, "min": -4096, "max": 4096, "step": 1}], "end_mode": ["COMBO", {"default": "Inclusive", "multiselect": false, "options": ["Inclusive", "Exclusive"]}]}}, "input_order": {"required": ["range_mode", "start", "stop", "step", "num_steps", "end_mode"]}, "is_input_list": true, "output": ["FLOAT", "INT"], "output_is_list": [true, true], "output_name": ["range", "range_sizes"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy rangeFloat", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Type", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy boolean": {"input": {"required": {"value": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["boolean"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy boolean", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Type", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy mathString": {"input": {"required": {"a": ["STRING", {"multiline": false}], "b": ["STRING", {"multiline": false}], "operation": ["COMBO", {"multiselect": false, "options": ["a == b", "a != b", "a IN b", "a MATCH REGEX(b)", "a BEGINSWITH b", "a ENDSWITH b"]}], "case_sensitive": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["a", "b", "operation", "case_sensitive"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy mathString", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Math", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy mathInt": {"input": {"required": {"a": ["INT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615, "step": 1}], "b": ["INT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615, "step": 1}], "operation": ["COMBO", {"multiselect": false, "options": ["add", "subtract", "multiply", "divide", "modulo", "power"]}]}}, "input_order": {"required": ["a", "b", "operation"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy mathInt", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Math", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy mathFloat": {"input": {"required": {"a": ["FLOAT", {"default": 0, "min": -999999999999.0, "max": 999999999999.0, "step": 0.01}], "b": ["FLOAT", {"default": 0, "min": -999999999999.0, "max": 999999999999.0, "step": 0.01}], "operation": ["COMBO", {"multiselect": false, "options": ["add", "subtract", "multiply", "divide", "modulo", "power"]}]}}, "input_order": {"required": ["a", "b", "operation"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy mathFloat", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Math", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy simpleMath": {"input": {"required": {"value": ["STRING", {"default": "", "multiline": false, "placeholder": "\u8f93\u5165\u6570\u5b66\u516c\u5f0f\uff0c\u5982: a + b, pow(a, 2), ceil(a / b), floor(a * b), round(a / b, 2)"}]}, "optional": {"a": ["*", {}], "b": ["*", {}], "c": ["*", {}]}}, "input_order": {"required": ["value"], "optional": ["a", "b", "c"]}, "is_input_list": false, "output": ["INT", "FLOAT", "BOOLEAN"], "output_is_list": [false, false, false], "output_name": ["int", "float", "boolean"], "output_tooltips": [null, null, null], "output_matchtypes": null, "name": "easy simpleMath", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Math", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy simpleMathDual": {"input": {"required": {"value1": ["STRING", {"default": "", "multiline": false, "placeholder": "\u8f93\u5165\u6570\u5b66\u516c\u5f0f1\uff0c\u5982: a + b, pow(a, 2), ceil(a / b)"}], "value2": ["STRING", {"default": "", "multiline": false, "placeholder": "\u8f93\u5165\u6570\u5b66\u516c\u5f0f2\uff0c\u5982: c * d, sqrt(c), floor(d / 2)"}]}, "optional": {"a": ["*", {}], "b": ["*", {}], "c": ["*", {}], "d": ["*", {}]}}, "input_order": {"required": ["value1", "value2"], "optional": ["a", "b", "c", "d"]}, "is_input_list": false, "output": ["INT", "FLOAT", "INT", "FLOAT"], "output_is_list": [false, false, false, false], "output_name": ["int1", "float1", "int2", "float2"], "output_tooltips": [null, null, null, null], "output_matchtypes": null, "name": "easy simpleMathDual", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Math", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy compare": {"input": {"required": {}, "optional": {"a": ["*", {}], "b": ["*", {}], "comparison": ["COMBO", {"default": "a == b", "multiselect": false, "options": ["a == b", "a != b", "a < b", "a > b", "a <= b", "a >= b", "a > 0", "a <= 0", "b > 0", "b <= 0"]}]}}, "input_order": {"required": [], "optional": ["a", "b", "comparison"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["boolean"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy compare", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Math", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy imageSwitch": {"input": {"required": {"image_a": ["IMAGE", {}], "image_b": ["IMAGE", {}], "boolean": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["image_a", "image_b", "boolean"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy imageSwitch", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Switch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy textSwitch": {"input": {"required": {"input": ["INT", {"default": 1, "min": 1, "max": 2}]}, "optional": {"text1": ["STRING", {"forceInput": true, "multiline": false}], "text2": ["STRING", {"forceInput": true, "multiline": false}]}}, "input_order": {"required": ["input"], "optional": ["text1", "text2"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy textSwitch", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Switch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy imageIndexSwitch": {"input": {"required": {"index": ["INT", {"default": 0, "min": 0, "max": 9, "step": 1}]}, "optional": {"image0": ["IMAGE", {"lazy": true}], "image1": ["IMAGE", {"lazy": true}], "image2": ["IMAGE", {"lazy": true}], "image3": ["IMAGE", {"lazy": true}], "image4": ["IMAGE", {"lazy": true}], "image5": ["IMAGE", {"lazy": true}], "image6": ["IMAGE", {"lazy": true}], "image7": ["IMAGE", {"lazy": true}], "image8": ["IMAGE", {"lazy": true}], "image9": ["IMAGE", {"lazy": true}], "image10": ["IMAGE", {"lazy": true}], "image11": ["IMAGE", {"lazy": true}], "image12": ["IMAGE", {"lazy": true}], "image13": ["IMAGE", {"lazy": true}], "image14": ["IMAGE", {"lazy": true}], "image15": ["IMAGE", {"lazy": true}], "image16": ["IMAGE", {"lazy": true}], "image17": ["IMAGE", {"lazy": true}], "image18": ["IMAGE", {"lazy": true}], "image19": ["IMAGE", {"lazy": true}]}}, "input_order": {"required": ["index"], "optional": ["image0", "image1", "image2", "image3", "image4", "image5", "image6", "image7", "image8", "image9", "image10", "image11", "image12", "image13", "image14", "image15", "image16", "image17", "image18", "image19"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy imageIndexSwitch", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Index Switch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy textIndexSwitch": {"input": {"required": {"index": ["INT", {"default": 0, "min": 0, "max": 9, "step": 1}]}, "optional": {"text0": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text1": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text2": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text3": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text4": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text5": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text6": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text7": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text8": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text9": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text10": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text11": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text12": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text13": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text14": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text15": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text16": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text17": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text18": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}], "text19": ["STRING", {"lazy": true, "forceInput": true, "multiline": false}]}}, "input_order": {"required": ["index"], "optional": ["text0", "text1", "text2", "text3", "text4", "text5", "text6", "text7", "text8", "text9", "text10", "text11", "text12", "text13", "text14", "text15", "text16", "text17", "text18", "text19"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["text"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy textIndexSwitch", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Index Switch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy conditioningIndexSwitch": {"input": {"required": {"index": ["INT", {"default": 0, "min": 0, "max": 9, "step": 1}]}, "optional": {"cond0": ["CONDITIONING", {"lazy": true}], "cond1": ["CONDITIONING", {"lazy": true}], "cond2": ["CONDITIONING", {"lazy": true}], "cond3": ["CONDITIONING", {"lazy": true}], "cond4": ["CONDITIONING", {"lazy": true}], "cond5": ["CONDITIONING", {"lazy": true}], "cond6": ["CONDITIONING", {"lazy": true}], "cond7": ["CONDITIONING", {"lazy": true}], "cond8": ["CONDITIONING", {"lazy": true}], "cond9": ["CONDITIONING", {"lazy": true}], "cond10": ["CONDITIONING", {"lazy": true}], "cond11": ["CONDITIONING", {"lazy": true}], "cond12": ["CONDITIONING", {"lazy": true}], "cond13": ["CONDITIONING", {"lazy": true}], "cond14": ["CONDITIONING", {"lazy": true}], "cond15": ["CONDITIONING", {"lazy": true}], "cond16": ["CONDITIONING", {"lazy": true}], "cond17": ["CONDITIONING", {"lazy": true}], "cond18": ["CONDITIONING", {"lazy": true}], "cond19": ["CONDITIONING", {"lazy": true}]}}, "input_order": {"required": ["index"], "optional": ["cond0", "cond1", "cond2", "cond3", "cond4", "cond5", "cond6", "cond7", "cond8", "cond9", "cond10", "cond11", "cond12", "cond13", "cond14", "cond15", "cond16", "cond17", "cond18", "cond19"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["conditioning"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy conditioningIndexSwitch", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Index Switch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy anythingIndexSwitch": {"input": {"required": {"index": ["INT", {"default": 0, "min": 0, "max": 9, "step": 1}]}, "optional": {"value0": ["*", {"lazy": true}], "value1": ["*", {"lazy": true}], "value2": ["*", {"lazy": true}], "value3": ["*", {"lazy": true}], "value4": ["*", {"lazy": true}], "value5": ["*", {"lazy": true}], "value6": ["*", {"lazy": true}], "value7": ["*", {"lazy": true}], "value8": ["*", {"lazy": true}], "value9": ["*", {"lazy": true}], "value10": ["*", {"lazy": true}], "value11": ["*", {"lazy": true}], "value12": ["*", {"lazy": true}], "value13": ["*", {"lazy": true}], "value14": ["*", {"lazy": true}], "value15": ["*", {"lazy": true}], "value16": ["*", {"lazy": true}], "value17": ["*", {"lazy": true}], "value18": ["*", {"lazy": true}], "value19": ["*", {"lazy": true}]}}, "input_order": {"required": ["index"], "optional": ["value0", "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10", "value11", "value12", "value13", "value14", "value15", "value16", "value17", "value18", "value19"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["value"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy anythingIndexSwitch", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/Index Switch", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy ab": {"input": {"required": {"A or B": ["BOOLEAN", {"default": true, "label_on": "A", "label_off": "B"}], "in": ["*", {}]}, "hidden": {"unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["A or B", "in"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["*", "*"], "output_is_list": [false, false], "output_name": ["A", "B"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy ab", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy anythingInversedSwitch": {"input": {"required": {"index": ["INT", {"default": 0, "min": 0, "max": 9, "step": 1}], "in": ["*", {}]}, "hidden": {"unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["index", "in"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7", "out8", "out9", "out10", "out11", "out12", "out13", "out14", "out15", "out16", "out17", "out18", "out19"], "output_tooltips": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "output_matchtypes": null, "name": "easy anythingInversedSwitch", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy whileLoopStart": {"input": {"required": {"condition": ["BOOLEAN", {"default": true}]}, "optional": {"initial_value0": ["*"], "initial_value1": ["*"], "initial_value2": ["*"], "initial_value3": ["*"], "initial_value4": ["*"], "initial_value5": ["*"], "initial_value6": ["*"], "initial_value7": ["*"], "initial_value8": ["*"], "initial_value9": ["*"], "initial_value10": ["*"], "initial_value11": ["*"], "initial_value12": ["*"], "initial_value13": ["*"], "initial_value14": ["*"], "initial_value15": ["*"], "initial_value16": ["*"], "initial_value17": ["*"], "initial_value18": ["*"], "initial_value19": ["*"]}}, "input_order": {"required": ["condition"], "optional": ["initial_value0", "initial_value1", "initial_value2", "initial_value3", "initial_value4", "initial_value5", "initial_value6", "initial_value7", "initial_value8", "initial_value9", "initial_value10", "initial_value11", "initial_value12", "initial_value13", "initial_value14", "initial_value15", "initial_value16", "initial_value17", "initial_value18", "initial_value19"]}, "is_input_list": false, "output": ["FLOW_CONTROL", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["flow", "value0", "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10", "value11", "value12", "value13", "value14", "value15", "value16", "value17", "value18", "value19"], "name": "easy whileLoopStart", "display_name": "While Loop Start", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/While Loop", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy whileLoopEnd": {"input": {"required": {"flow": ["FLOW_CONTROL", {"rawLink": true}], "condition": ["BOOLEAN", {}]}, "optional": {"initial_value0": ["*"], "initial_value1": ["*"], "initial_value2": ["*"], "initial_value3": ["*"], "initial_value4": ["*"], "initial_value5": ["*"], "initial_value6": ["*"], "initial_value7": ["*"], "initial_value8": ["*"], "initial_value9": ["*"], "initial_value10": ["*"], "initial_value11": ["*"], "initial_value12": ["*"], "initial_value13": ["*"], "initial_value14": ["*"], "initial_value15": ["*"], "initial_value16": ["*"], "initial_value17": ["*"], "initial_value18": ["*"], "initial_value19": ["*"]}, "hidden": {"dynprompt": "DYNPROMPT", "unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["flow", "condition"], "optional": ["initial_value0", "initial_value1", "initial_value2", "initial_value3", "initial_value4", "initial_value5", "initial_value6", "initial_value7", "initial_value8", "initial_value9", "initial_value10", "initial_value11", "initial_value12", "initial_value13", "initial_value14", "initial_value15", "initial_value16", "initial_value17", "initial_value18", "initial_value19"], "hidden": ["dynprompt", "unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["value0", "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10", "value11", "value12", "value13", "value14", "value15", "value16", "value17", "value18", "value19"], "name": "easy whileLoopEnd", "display_name": "While Loop End", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/While Loop", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy forLoopStart": {"input": {"required": {"total": ["INT", {"default": 1, "min": 1, "max": 100000, "step": 1}]}, "optional": {"initial_value1": ["*"], "initial_value2": ["*"], "initial_value3": ["*"], "initial_value4": ["*"], "initial_value5": ["*"], "initial_value6": ["*"], "initial_value7": ["*"], "initial_value8": ["*"], "initial_value9": ["*"], "initial_value10": ["*"], "initial_value11": ["*"], "initial_value12": ["*"], "initial_value13": ["*"], "initial_value14": ["*"], "initial_value15": ["*"], "initial_value16": ["*"], "initial_value17": ["*"], "initial_value18": ["*"], "initial_value19": ["*"]}, "hidden": {"initial_value0": ["*"], "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["total"], "optional": ["initial_value1", "initial_value2", "initial_value3", "initial_value4", "initial_value5", "initial_value6", "initial_value7", "initial_value8", "initial_value9", "initial_value10", "initial_value11", "initial_value12", "initial_value13", "initial_value14", "initial_value15", "initial_value16", "initial_value17", "initial_value18", "initial_value19"], "hidden": ["initial_value0", "prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["FLOW_CONTROL", "INT", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["flow", "index", "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10", "value11", "value12", "value13", "value14", "value15", "value16", "value17", "value18", "value19"], "name": "easy forLoopStart", "display_name": "For Loop Start", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/For Loop", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy forLoopEnd": {"input": {"required": {"flow": ["FLOW_CONTROL", {"rawLink": true}]}, "optional": {"initial_value1": ["*", {"rawLink": true}], "initial_value2": ["*", {"rawLink": true}], "initial_value3": ["*", {"rawLink": true}], "initial_value4": ["*", {"rawLink": true}], "initial_value5": ["*", {"rawLink": true}], "initial_value6": ["*", {"rawLink": true}], "initial_value7": ["*", {"rawLink": true}], "initial_value8": ["*", {"rawLink": true}], "initial_value9": ["*", {"rawLink": true}], "initial_value10": ["*", {"rawLink": true}], "initial_value11": ["*", {"rawLink": true}], "initial_value12": ["*", {"rawLink": true}], "initial_value13": ["*", {"rawLink": true}], "initial_value14": ["*", {"rawLink": true}], "initial_value15": ["*", {"rawLink": true}], "initial_value16": ["*", {"rawLink": true}], "initial_value17": ["*", {"rawLink": true}], "initial_value18": ["*", {"rawLink": true}], "initial_value19": ["*", {"rawLink": true}]}, "hidden": {"dynprompt": "DYNPROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["flow"], "optional": ["initial_value1", "initial_value2", "initial_value3", "initial_value4", "initial_value5", "initial_value6", "initial_value7", "initial_value8", "initial_value9", "initial_value10", "initial_value11", "initial_value12", "initial_value13", "initial_value14", "initial_value15", "initial_value16", "initial_value17", "initial_value18", "initial_value19"], "hidden": ["dynprompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10", "value11", "value12", "value13", "value14", "value15", "value16", "value17", "value18", "value19"], "name": "easy forLoopEnd", "display_name": "For Loop End", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic/For Loop", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy blocker": {"input": {"required": {"continue": ["BOOLEAN", {"default": false}], "in": ["*", {}]}}, "input_order": {"required": ["continue", "in"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["out"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy blocker", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy ifElse": {"input": {"required": {"boolean": ["BOOLEAN", {}], "on_true": ["*", {"lazy": true}], "on_false": ["*", {"lazy": true}]}}, "input_order": {"required": ["boolean", "on_true", "on_false"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy ifElse", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy isMaskEmpty": {"input": {"required": {"mask": ["MASK", {}]}}, "input_order": {"required": ["mask"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["boolean"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy isMaskEmpty", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy isNone": {"input": {"required": {"any": ["*", {}]}}, "input_order": {"required": ["any"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["boolean"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy isNone", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy isSDXL": {"input": {"required": {}, "optional": {"optional_pipe": ["PIPE_LINE", {}], "optional_clip": ["CLIP", {}]}}, "input_order": {"required": [], "optional": ["optional_pipe", "optional_clip"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["boolean"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy isSDXL", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy isFileExist": {"input": {"required": {"file_path": ["STRING", {"default": "", "multiline": false}], "file_name": ["STRING", {"default": "", "multiline": false}], "file_extension": ["STRING", {"default": "", "multiline": false}]}}, "input_order": {"required": ["file_path", "file_name", "file_extension"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["boolean"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy isFileExist", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy stringToIntList": {"input": {"required": {"string": ["STRING", {"default": "1, 2, 3", "multiline": true}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy stringToIntList", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy stringToFloatList": {"input": {"required": {"string": ["STRING", {"default": "1, 2, 3", "multiline": true}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy stringToFloatList", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy stringJoinLines": {"input": {"required": {"string": ["STRING", {"default": "", "multiline": true}], "delimiter": ["STRING", {"default": " | ", "multiline": false}]}}, "input_order": {"required": ["string", "delimiter"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy stringJoinLines", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy outputToList": {"input": {"required": {"tuple": ["*", {}]}}, "input_order": {"required": ["tuple"]}, "is_input_list": false, "output": ["*"], "output_is_list": [true], "output_name": ["list"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy outputToList", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy pixels": {"input": {"required": {"resolution": ["COMBO", {"multiselect": false, "options": ["width x height (custom)", "512 x 512", "512 x 768", "576 x 1024", "768 x 512", "768 x 768", "768 x 1024", "768 x 1280", "768 x 1344", "768 x 1536", "816 x 1920", "832 x 1152", "832 x 1216", "896 x 1152", "896 x 1088", "1024 x 1024", "1024 x 576", "1024 x 768", "1080 x 1920", "1440 x 2560", "1088 x 896", "1216 x 832", "1152 x 832", "1152 x 896", "1280 x 768", "1344 x 768", "1536 x 640", "1536 x 768", "1920 x 816", "1920 x 1080", "2560 x 1440"]}], "width": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "height": ["INT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "scale": ["FLOAT", {"default": 2.0, "min": 0.001, "max": 10, "step": 0.001}], "flip_w/h": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["resolution", "width", "height", "scale", "flip_w/h"]}, "is_input_list": false, "output": ["INT", "INT", "*", "*", "*"], "output_is_list": [false, false, false, false, false], "output_name": ["width_norm", "height_norm", "width", "height", "scale_factor"], "output_tooltips": [null, null, null, null, null], "output_matchtypes": null, "name": "easy pixels", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy xyAny": {"input": {"required": {"X": ["*", {}], "Y": ["*", {}], "direction": ["COMBO", {"default": "horizontal", "multiselect": false, "options": ["horizontal", "vertical"]}]}}, "input_order": {"required": ["X", "Y", "direction"]}, "is_input_list": true, "output": ["*", "*"], "output_is_list": [true, true], "output_name": ["X", "Y"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy xyAny", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy lengthAnything": {"input": {"required": {"any": ["*", {}]}, "hidden": {"prompt": ["PROMPT"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["any"], "hidden": ["prompt", "unique_id"]}, "is_input_list": true, "output": ["INT"], "output_is_list": [false], "output_name": ["length"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy lengthAnything", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy indexAnything": {"input": {"required": {"any": ["*", {}], "index": ["INT", {"default": 0, "min": -1000000, "max": 1000000, "step": 1}]}, "hidden": {"prompt": ["PROMPT"], "unique_id": ["UNIQUE_ID"]}}, "input_order": {"required": ["any", "index"], "hidden": ["prompt", "unique_id"]}, "is_input_list": true, "output": ["*"], "output_is_list": [false], "output_name": ["out"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy indexAnything", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy batchAnything": {"input": {"required": {"any_1": ["*", {}], "any_2": ["*", {}]}}, "input_order": {"required": ["any_1", "any_2"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["batch"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy batchAnything", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy convertAnything": {"input": {"required": {"*": ["*", {}], "output_type": ["COMBO", {"default": "string", "multiselect": false, "options": ["string", "int", "float", "boolean"]}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["*", "output_type"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["output"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy convertAnything", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy showAnything": {"input": {"required": {}, "optional": {"anything": ["*", {}]}, "hidden": {"unique_id": ["UNIQUE_ID"], "extra_pnginfo": ["EXTRA_PNGINFO"], "prompt": ["PROMPT"]}}, "input_order": {"required": [], "optional": ["anything"], "hidden": ["unique_id", "extra_pnginfo", "prompt"]}, "is_input_list": true, "output": ["*"], "output_is_list": [false], "output_name": ["output"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy showAnything", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy showTensorShape": {"input": {"required": {"tensor": ["*", {}]}, "hidden": {"unique_id": ["UNIQUE_ID"], "extra_pnginfo": ["EXTRA_PNGINFO"], "prompt": ["PROMPT"]}}, "input_order": {"required": ["tensor"], "hidden": ["unique_id", "extra_pnginfo", "prompt"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "output_tooltips": [], "output_matchtypes": null, "name": "easy showTensorShape", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy clearCacheKey": {"input": {"required": {"anything": ["*", {}], "cache_key": ["STRING", {"default": "*", "multiline": false}]}, "hidden": {"unique_id": ["UNIQUE_ID"], "extra_pnginfo": ["EXTRA_PNGINFO"], "prompt": ["PROMPT"]}}, "input_order": {"required": ["anything", "cache_key"], "hidden": ["unique_id", "extra_pnginfo", "prompt"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["output"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy clearCacheKey", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy clearCacheAll": {"input": {"required": {"anything": ["*", {}]}, "hidden": {"unique_id": ["UNIQUE_ID"], "extra_pnginfo": ["EXTRA_PNGINFO"], "prompt": ["PROMPT"]}}, "input_order": {"required": ["anything"], "hidden": ["unique_id", "extra_pnginfo", "prompt"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["output"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy clearCacheAll", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy cleanGpuUsed": {"input": {"required": {"anything": ["*", {}]}, "hidden": {"unique_id": ["UNIQUE_ID"], "extra_pnginfo": ["EXTRA_PNGINFO"], "prompt": ["PROMPT"]}}, "input_order": {"required": ["anything"], "hidden": ["unique_id", "extra_pnginfo", "prompt"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["output"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy cleanGpuUsed", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy saveText": {"input": {"required": {"text": ["STRING", {"default": "", "forceInput": true, "multiline": false}], "output_file_path": ["STRING", {"default": "", "multiline": false}], "file_name": ["STRING", {"default": "", "multiline": false}], "file_extension": ["COMBO", {"multiselect": false, "options": ["txt", "csv"]}], "overwrite": ["BOOLEAN", {"default": true}]}, "optional": {"image": ["IMAGE", {}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["text", "output_file_path", "file_name", "file_extension", "overwrite"], "optional": ["image"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "IMAGE"], "output_is_list": [false, false], "output_name": ["text", "image"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy saveText", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy sleep": {"input": {"required": {"any": ["*", {}], "delay": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1000000, "step": 0.1}]}}, "input_order": {"required": ["any", "delay"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["out"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy sleep", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": false, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy joyCaption2API": {"input": {"required": {"image": ["IMAGE"], "do_sample": [[true, false]], "temperature": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 2.0, "step": 0.01, "round": 0.001, "display": "number"}], "max_tokens": ["INT", {"default": 256, "min": 16, "max": 512, "step": 16, "display": "number"}], "caption_type": [["Descriptive", "Descriptive (Informal)", "Training Prompt", "MidJourney", "Booru tag list", "Booru-like tag list", "Art Critic", "Product Listing", "Social Media Post"]], "caption_length": [["any", "very short", "short", "medium-length", "long", "very long", "20", "30", "40", "50", "60", "70", "80", "90", "100", "110", "120", "130", "140", "150", "160", "170", "180", "190", "200", "210", "220", "230", "240", "250", "260"]], "extra_options": ["STRING", {"placeholder": "Extra options(e.g):\nIf there is a person/character in the image you must refer to them as {name}.", "tooltip": "Extra options for the model", "multiline": true}], "name_input": ["STRING", {"default": "", "tooltip": "Name input is only used if an Extra Option is selected that requires it."}], "custom_prompt": ["STRING", {"default": "", "multiline": true}]}, "optional": {"apikey_override": ["STRING", {"default": "", "forceInput": true, "tooltip": "Override the API key in the local config"}]}}, "input_order": {"required": ["image", "do_sample", "temperature", "max_tokens", "caption_type", "caption_length", "extra_options", "name_input", "custom_prompt"], "optional": ["apikey_override"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["caption"], "name": "easy joyCaption2API", "display_name": "JoyCaption2 (BizyAIR)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/API", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy joyCaption3API": {"input": {"required": {"image": ["IMAGE"], "do_sample": [[true, false]], "temperature": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 2.0, "step": 0.01, "round": 0.001, "display": "number"}], "max_tokens": ["INT", {"default": 256, "min": 16, "max": 512, "step": 16, "display": "number"}], "caption_type": [["Descriptive", "Descriptive (Informal)", "Training Prompt", "MidJourney", "Booru tag list", "Booru-like tag list", "Art Critic", "Product Listing", "Social Media Post"]], "caption_length": [["any", "very short", "short", "medium-length", "long", "very long", "20", "30", "40", "50", "60", "70", "80", "90", "100", "110", "120", "130", "140", "150", "160", "170", "180", "190", "200", "210", "220", "230", "240", "250", "260"]], "extra_options": ["STRING", {"placeholder": "Extra options(e.g):\nIf there is a person/character in the image you must refer to them as {name}.", "tooltip": "Extra options for the model", "multiline": true}], "name_input": ["STRING", {"default": "", "tooltip": "Name input is only used if an Extra Option is selected that requires it."}], "custom_prompt": ["STRING", {"default": "", "multiline": true}]}, "optional": {"apikey_override": ["STRING", {"default": "", "forceInput": true, "tooltip": "Override the API key in the local config"}]}}, "input_order": {"required": ["image", "do_sample", "temperature", "max_tokens", "caption_type", "caption_length", "extra_options", "name_input", "custom_prompt"], "optional": ["apikey_override"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["caption"], "name": "easy joyCaption3API", "display_name": "JoyCaption3 (BizyAIR)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/API", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "easy if": {"input": {"required": {"any": ["*"], "if": ["*"], "else": ["*"]}}, "input_order": {"required": ["any", "if", "else"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["?"], "name": "easy if", "display_name": "If (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy poseEditor": {"input": {"required": {"image": ["STRING", {"default": ""}]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "easy poseEditor", "display_name": "PoseEditor (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy imageToMask": {"input": {"required": {"image": ["IMAGE"], "channel": [["red", "green", "blue"]]}}, "input_order": {"required": ["image", "channel"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "easy imageToMask", "display_name": "ImageToMask (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy showSpentTime": {"input": {"required": {"pipe": ["PIPE_LINE"]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["pipe"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "easy showSpentTime", "display_name": "Show Spent Time (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": true, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy latentNoisy": {"input": {"required": {"sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "steps": ["INT", {"default": 10000, "min": 0, "max": 10000}], "start_at_step": ["INT", {"default": 0, "min": 0, "max": 10000}], "end_at_step": ["INT", {"default": 10000, "min": 1, "max": 10000}], "source": [["CPU", "GPU"]], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}]}, "optional": {"pipe": ["PIPE_LINE"], "optional_model": ["MODEL"], "optional_latent": ["LATENT"]}}, "input_order": {"required": ["sampler_name", "scheduler", "steps", "start_at_step", "end_at_step", "source", "seed"], "optional": ["pipe", "optional_model", "optional_latent"]}, "is_input_list": false, "output": ["PIPE_LINE", "LATENT", "FLOAT"], "output_is_list": [false, false, false], "output_name": ["pipe", "latent", "sigma"], "name": "easy latentNoisy", "display_name": "LatentNoisy (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy latentCompositeMaskedWithCond": {"input": {"required": {"pipe": ["PIPE_LINE"], "text_combine": ["LIST"], "source_latent": ["LATENT"], "source_mask": ["MASK"], "destination_mask": ["MASK"], "text_combine_mode": [["add", "replace", "cover"], {"default": "add"}], "replace_text": ["STRING", {"default": ""}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pipe", "text_combine", "source_latent", "source_mask", "destination_mask", "text_combine_mode", "replace_text"], "hidden": ["prompt", "extra_pnginfo", "my_unique_id"]}, "is_input_list": false, "output": ["PIPE_LINE", "LATENT", "CONDITIONING"], "output_is_list": [false, false, true], "output_name": ["pipe", "latent", "conditioning"], "name": "easy latentCompositeMaskedWithCond", "display_name": "LatentCompositeMaskedWithCond (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy injectNoiseToLatent": {"input": {"required": {"strength": ["FLOAT", {"default": 0.1, "min": 0.0, "max": 200.0, "step": 0.0001}], "normalize": ["BOOLEAN", {"default": false}], "average": ["BOOLEAN", {"default": false}]}, "optional": {"pipe_to_noise": ["PIPE_LINE"], "image_to_latent": ["IMAGE"], "latent": ["LATENT"], "noise": ["LATENT"], "mask": ["MASK"], "mix_randn_amount": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.001}], "seed": ["INT", {"default": 123, "min": 0, "max": 18446744073709551615, "step": 1}]}}, "input_order": {"required": ["strength", "normalize", "average"], "optional": ["pipe_to_noise", "image_to_latent", "latent", "noise", "mask", "mix_randn_amount", "seed"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "easy injectNoiseToLatent", "display_name": "InjectNoiseToLatent (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy stableDiffusion3API": {"input": {"required": {"positive": ["STRING", {"default": "", "placeholder": "Positive", "multiline": true}], "negative": ["STRING", {"default": "", "placeholder": "Negative", "multiline": true}], "model": [["sd3", "sd3-turbo"]], "aspect_ratio": [["16:9", "1:1", "21:9", "2:3", "3:2", "4:5", "5:4", "9:16", "9:21"]], "seed": ["INT", {"default": 0, "min": 0, "max": 4294967294}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}]}, "optional": {"optional_image": ["IMAGE"]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["positive", "negative", "model", "aspect_ratio", "seed", "denoise"], "optional": ["optional_image"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "easy stableDiffusion3API", "display_name": "StableDiffusion3API (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy saveImageLazy": {"input": {"required": {"images": ["IMAGE"], "filename_prefix": ["STRING", {"default": "ComfyUI"}], "save_metadata": ["BOOLEAN", {"default": true}]}, "optional": {}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "filename_prefix", "save_metadata"], "optional": [], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "name": "easy saveImageLazy", "display_name": "SaveImageLazy (\ud83d\udeabDeprecated)", "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/\ud83d\udeab Deprecated", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "easy saveTextLazy": {"input": {"required": {"text": ["STRING", {"default": "", "forceInput": true, "multiline": false}], "output_file_path": ["STRING", {"default": "", "multiline": false}], "file_name": ["STRING", {"default": "", "multiline": false}], "file_extension": ["COMBO", {"multiselect": false, "options": ["txt", "csv"]}], "overwrite": ["BOOLEAN", {"default": true}]}, "optional": {"image": ["IMAGE", {}]}, "hidden": {"prompt": ["PROMPT"], "extra_pnginfo": ["EXTRA_PNGINFO"]}}, "input_order": {"required": ["text", "output_file_path", "file_name", "file_extension", "overwrite"], "optional": ["image"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING", "IMAGE"], "output_is_list": [false, false], "output_name": ["text", "image"], "output_tooltips": [null, null], "output_matchtypes": null, "name": "easy saveText", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "easy showAnythingLazy": {"input": {"required": {}, "optional": {"anything": ["*", {}]}, "hidden": {"unique_id": ["UNIQUE_ID"], "extra_pnginfo": ["EXTRA_PNGINFO"], "prompt": ["PROMPT"]}}, "input_order": {"required": [], "optional": ["anything"], "hidden": ["unique_id", "extra_pnginfo", "prompt"]}, "is_input_list": true, "output": ["*"], "output_is_list": [false], "output_name": ["output"], "output_tooltips": [null], "output_matchtypes": null, "name": "easy showAnything", "display_name": null, "description": "", "python_module": "custom_nodes.ComfyUI-Easy-Use", "category": "EasyUse/Logic", "output_node": true, "deprecated": false, "experimental": false, "dev_only": false, "api_node": false, "price_badge": null, "search_aliases": null, "essentials_category": null, "has_intermediate_output": false}, "DownloadAndLoadFlorence2Model": {"input": {"required": {"model": [["microsoft/Florence-2-base", "microsoft/Florence-2-base-ft", "microsoft/Florence-2-large", "microsoft/Florence-2-large-ft", "HuggingFaceM4/Florence-2-DocVQA", "thwri/CogFlorence-2.1-Large", "thwri/CogFlorence-2.2-Large", "gokaygokay/Florence-2-SD3-Captioner", "gokaygokay/Florence-2-Flux-Large", "MiaoshouAI/Florence-2-base-PromptGen-v1.5", "MiaoshouAI/Florence-2-large-PromptGen-v1.5", "MiaoshouAI/Florence-2-base-PromptGen-v2.0", "MiaoshouAI/Florence-2-large-PromptGen-v2.0", "PJMixers-Images/Florence-2-base-Castollux-v0.5"], {"default": "microsoft/Florence-2-base"}], "precision": [["fp16", "bf16", "fp32"], {"default": "fp16"}], "attention": [["flash_attention_2", "sdpa", "eager"], {"default": "sdpa"}]}, "optional": {"lora": ["PEFTLORA"], "convert_to_safetensors": ["BOOLEAN", {"default": false, "tooltip": "Some of the older model weights are not saved in .safetensors format, which seem to cause longer loading times, this option converts the .bin weights to .safetensors"}]}}, "input_order": {"required": ["model", "precision", "attention"], "optional": ["lora", "convert_to_safetensors"]}, "is_input_list": false, "output": ["FL2MODEL"], "output_is_list": [false], "output_name": ["florence2_model"], "name": "DownloadAndLoadFlorence2Model", "display_name": "DownloadAndLoadFlorence2Model", "description": "", "python_module": "custom_nodes.ComfyUI-Florence2", "category": "Florence2", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DownloadAndLoadFlorence2Lora": {"input": {"required": {"model": [["NikshepShetty/Florence-2-pixelprose"]]}}, "input_order": {"required": ["model"]}, "is_input_list": false, "output": ["PEFTLORA"], "output_is_list": [false], "output_name": ["lora"], "name": "DownloadAndLoadFlorence2Lora", "display_name": "DownloadAndLoadFlorence2Lora", "description": "", "python_module": "custom_nodes.ComfyUI-Florence2", "category": "Florence2", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Florence2ModelLoader": {"input": {"required": {"model": [["Florence-2-Flux-Large", "Florence-2-SD3-Captioner", "Florence-2-base", "Florence-2-base-PromptGen-v2.0", "Florence-2-base-ft", "Florence-2-large", "Florence-2-large-PromptGen-v1.5", "Florence-2-large-PromptGen-v2.0", "Florence-2-large-ft", "Florence-2-pixelpros", "Llama-3.2-3B-Instruct", "Meta-Llama-3.1-8B-Instruct-bnb-4bit", "OmniGen-v1", "checkpoints", "llava-llama-3-8b-text-encoder-tokenizer", "llava-llama-3-8b-v1_1-transformers"], {"tooltip": "models are expected to be in Comfyui/models/LLM folder"}], "precision": [["fp16", "bf16", "fp32"]], "attention": [["flash_attention_2", "sdpa", "eager"], {"default": "sdpa"}]}, "optional": {"lora": ["PEFTLORA"], "convert_to_safetensors": ["BOOLEAN", {"default": false, "tooltip": "Some of the older model weights are not saved in .safetensors format, which seem to cause longer loading times, this option converts the .bin weights to .safetensors"}]}}, "input_order": {"required": ["model", "precision", "attention"], "optional": ["lora", "convert_to_safetensors"]}, "is_input_list": false, "output": ["FL2MODEL"], "output_is_list": [false], "output_name": ["florence2_model"], "name": "Florence2ModelLoader", "display_name": "Florence2ModelLoader", "description": "", "python_module": "custom_nodes.ComfyUI-Florence2", "category": "Florence2", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Florence2Run": {"input": {"required": {"image": ["IMAGE"], "florence2_model": ["FL2MODEL"], "text_input": ["STRING", {"default": "", "multiline": true}], "task": [["region_caption", "dense_region_caption", "region_proposal", "caption", "detailed_caption", "more_detailed_caption", "caption_to_phrase_grounding", "referring_expression_segmentation", "ocr", "ocr_with_region", "docvqa", "prompt_gen_tags", "prompt_gen_mixed_caption", "prompt_gen_analyze", "prompt_gen_mixed_caption_plus"]], "fill_mask": ["BOOLEAN", {"default": true}]}, "optional": {"keep_model_loaded": ["BOOLEAN", {"default": false}], "max_new_tokens": ["INT", {"default": 1024, "min": 1, "max": 4096}], "num_beams": ["INT", {"default": 3, "min": 1, "max": 64}], "do_sample": ["BOOLEAN", {"default": true}], "output_mask_select": ["STRING", {"default": ""}], "seed": ["INT", {"default": 1, "min": 1, "max": 18446744073709551615}]}}, "input_order": {"required": ["image", "florence2_model", "text_input", "task", "fill_mask"], "optional": ["keep_model_loaded", "max_new_tokens", "num_beams", "do_sample", "output_mask_select", "seed"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "STRING", "JSON"], "output_is_list": [false, false, false, false], "output_name": ["image", "mask", "caption", "data"], "name": "Florence2Run", "display_name": "Florence2Run", "description": "", "python_module": "custom_nodes.ComfyUI-Florence2", "category": "Florence2", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "UltralyticsDetectorProvider": {"input": {"required": {"model_name": [["bbox/Eyeful_v2-Paired.pt", "bbox/Eyes.pt", "bbox/face_yolov8m.pt", "bbox/face_yolov8n.pt", "bbox/face_yolov8n_v2.pt", "bbox/face_yolov8s.pt", "bbox/hand_yolov8n.pt", "bbox/hand_yolov8s.pt", "bbox/lips_v1.pt", "bbox/yolov8s.pt", "segm/deepfashion2_yolov8s-seg.pt", "segm/face_yolov8m-seg_60.pt", "segm/face_yolov8n-seg2_60.pt", "segm/facial_features_yolo8x-seg.pt", "segm/flowers_seg_yolov8model.pt", "segm/hair_yolov8n-seg_60.pt", "segm/person_yolov8m-seg.pt", "segm/person_yolov8n-seg.pt", "segm/person_yolov8s-seg.pt", "segm/skin_yolov8m-seg_400.pt", "segm/skin_yolov8n-seg_400.pt", "segm/skin_yolov8n-seg_800.pt", "segm/yolov8_butterfly_custom.pt", "segm/yolov8l-seg.pt", "segm/yolov8m-seg.pt", "segm/yolov8n-seg.pt", "segm/yolov8s-seg.pt", "segm/yolov8x-seg.pt"]]}}, "input_order": {"required": ["model_name"]}, "is_input_list": false, "output": ["BBOX_DETECTOR", "SEGM_DETECTOR"], "output_is_list": [false, false], "output_name": ["BBOX_DETECTOR", "SEGM_DETECTOR"], "name": "UltralyticsDetectorProvider", "display_name": "UltralyticsDetectorProvider", "description": "", "python_module": "custom_nodes.ComfyUI-Impact-Subpack", "category": "ImpactPack", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LABColorTransfer": {"input": {"required": {"target": ["IMAGE"], "reference": ["IMAGE"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "preserve_l": ["BOOLEAN", {"default": true}], "transfer_a": ["BOOLEAN", {"default": true}], "transfer_b": ["BOOLEAN", {"default": true}]}, "optional": {"mask": ["MASK"]}}, "input_order": {"required": ["target", "reference", "strength", "preserve_l", "transfer_a", "transfer_b"], "optional": ["mask"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "LABColorTransfer", "display_name": "LABColorTransfer", "description": "", "python_module": "custom_nodes.ComfyUI-LABColorTransfer", "category": "image/color", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BiRefNetRMBG": {"input": {"required": {"image": ["IMAGE", {"tooltip": "Input image to be processed for background removal."}], "model": [["BiRefNet-general", "BiRefNet_512x512", "BiRefNet-HR", "BiRefNet-portrait", "BiRefNet-matting", "BiRefNet-HR-matting", "BiRefNet_lite", "BiRefNet_lite-2K", "BiRefNet_dynamic", "BiRefNet_lite-matting", "BiRefNet_toonout"], {"tooltip": "Select the BiRefNet model variant to use."}]}, "optional": {"mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Specify the amount of blur to apply to the mask edges (0 for no blur, higher values for more blur)."}], "mask_offset": ["INT", {"default": 0, "min": -20, "max": 20, "step": 1, "tooltip": "Adjust the mask boundary (positive values expand the mask, negative values shrink it)."}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Enable to invert both the image and mask output (useful for certain effects)."}], "refine_foreground": ["BOOLEAN", {"default": false, "tooltip": "Use Fast Foreground Colour Estimation to optimize transparent background"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Choose background type: Alpha (transparent) or Color (custom background color)."}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Choose background color (Alpha = transparent)"}]}}, "input_order": {"required": ["image", "model"], "optional": ["mask_blur", "mask_offset", "invert_output", "refine_foreground", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "BiRefNetRMBG", "display_name": "BiRefNet (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BodySegment": {"input": {"required": {"images": ["IMAGE"]}, "optional": {"Hair": ["BOOLEAN", {"default": false}], "Glasses": ["BOOLEAN", {"default": false}], "Top-clothes": ["BOOLEAN", {"default": false}], "Bottom-clothes": ["BOOLEAN", {"default": false}], "Torso-skin": ["BOOLEAN", {"default": false}], "Face": ["BOOLEAN", {"default": false}], "Left-arm": ["BOOLEAN", {"default": false}], "Right-arm": ["BOOLEAN", {"default": false}], "Left-leg": ["BOOLEAN", {"default": false}], "Right-leg": ["BOOLEAN", {"default": false}], "Left-foot": ["BOOLEAN", {"default": false}], "Right-foot": ["BOOLEAN", {"default": false}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Blur amount for mask edges"}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Expand/Shrink mask boundary"}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Invert both image and mask output"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Choose background type: Alpha (transparent) or Color (custom background color)."}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Choose background color (Alpha = transparent)"}]}}, "input_order": {"required": ["images"], "optional": ["Hair", "Glasses", "Top-clothes", "Bottom-clothes", "Torso-skin", "Face", "Left-arm", "Right-arm", "Left-leg", "Right-leg", "Left-foot", "Right-foot", "mask_blur", "mask_offset", "invert_output", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "BodySegment", "display_name": "Body Segment (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ClothesSegment": {"input": {"required": {"images": ["IMAGE"]}, "optional": {"Hat": ["BOOLEAN", {"default": false}], "Hair": ["BOOLEAN", {"default": false}], "Face": ["BOOLEAN", {"default": false}], "Sunglasses": ["BOOLEAN", {"default": false}], "Upper-clothes": ["BOOLEAN", {"default": false}], "Skirt": ["BOOLEAN", {"default": false}], "Dress": ["BOOLEAN", {"default": false}], "Belt": ["BOOLEAN", {"default": false}], "Pants": ["BOOLEAN", {"default": false}], "Left-arm": ["BOOLEAN", {"default": false}], "Right-arm": ["BOOLEAN", {"default": false}], "Left-leg": ["BOOLEAN", {"default": false}], "Right-leg": ["BOOLEAN", {"default": false}], "Bag": ["BOOLEAN", {"default": false}], "Scarf": ["BOOLEAN", {"default": false}], "Left-shoe": ["BOOLEAN", {"default": false}], "Right-shoe": ["BOOLEAN", {"default": false}], "Background": ["BOOLEAN", {"default": false}], "process_res": ["INT", {"default": 512, "min": 128, "max": 2048, "step": 32, "tooltip": "Processing resolution (higher = more VRAM)"}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Blur amount for mask edges"}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Expand/Shrink mask boundary"}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Invert both image and mask output"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Choose background type: Alpha (transparent) or Color (custom background color)."}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Choose background color (Alpha = transparent)"}]}}, "input_order": {"required": ["images"], "optional": ["Hat", "Hair", "Face", "Sunglasses", "Upper-clothes", "Skirt", "Dress", "Belt", "Pants", "Left-arm", "Right-arm", "Left-leg", "Right-leg", "Bag", "Scarf", "Left-shoe", "Right-shoe", "Background", "process_res", "mask_blur", "mask_offset", "invert_output", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "ClothesSegment", "display_name": "Clothes Segment (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ColorToMask": {"input": {"required": {"images": ["IMAGE"], "invert": ["BOOLEAN", {"default": false}], "threshold": ["INT", {"default": 10, "min": 0, "max": 255, "step": 1}], "mask_color": ["COLORCODE", {"default": "#FFFFFF", "tooltip": "Mask color (hex)"}]}}, "input_order": {"required": ["images", "invert", "threshold", "mask_color"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "AILab_ColorToMask", "display_name": "Color to Mask (RMBG) \ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FaceSegment": {"input": {"required": {"images": ["IMAGE"]}, "optional": {"Skin": ["BOOLEAN", {"default": false}], "Nose": ["BOOLEAN", {"default": false}], "Eyeglasses": ["BOOLEAN", {"default": false}], "Left-eye": ["BOOLEAN", {"default": false}], "Right-eye": ["BOOLEAN", {"default": false}], "Left-eyebrow": ["BOOLEAN", {"default": false}], "Right-eyebrow": ["BOOLEAN", {"default": false}], "Left-ear": ["BOOLEAN", {"default": false}], "Right-ear": ["BOOLEAN", {"default": false}], "Mouth": ["BOOLEAN", {"default": false}], "Upper-lip": ["BOOLEAN", {"default": false}], "Lower-lip": ["BOOLEAN", {"default": false}], "Hair": ["BOOLEAN", {"default": false}], "Earring": ["BOOLEAN", {"default": false}], "Neck": ["BOOLEAN", {"default": false}], "process_res": ["INT", {"default": 512, "min": 128, "max": 2048, "step": 32, "tooltip": "Processing resolution (higher = more VRAM)"}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Blur amount for mask edges"}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Expand/Shrink mask boundary"}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Invert both image and mask output"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Choose background type: Alpha (transparent) or Color (custom background color)."}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Choose background color (Alpha = transparent)"}]}}, "input_order": {"required": ["images"], "optional": ["Skin", "Nose", "Eyeglasses", "Left-eye", "Right-eye", "Left-eyebrow", "Right-eyebrow", "Left-ear", "Right-ear", "Mouth", "Upper-lip", "Lower-lip", "Hair", "Earring", "Neck", "process_res", "mask_blur", "mask_offset", "invert_output", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "FaceSegment", "display_name": "Face Segment (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FashionSegmentAccessories": {"input": {"required": {}, "optional": {"hat": ["BOOLEAN", {"default": false}], "glasses": ["BOOLEAN", {"default": false}], "headband, head covering, hair accessory": ["BOOLEAN", {"default": false}], "scarf": ["BOOLEAN", {"default": false}], "tie": ["BOOLEAN", {"default": false}], "glove": ["BOOLEAN", {"default": false}], "watch": ["BOOLEAN", {"default": false}], "belt": ["BOOLEAN", {"default": false}], "leg warmer": ["BOOLEAN", {"default": false}], "bag, wallet": ["BOOLEAN", {"default": false}], "umbrella": ["BOOLEAN", {"default": false}], "collar": ["BOOLEAN", {"default": false}], "lapel": ["BOOLEAN", {"default": false}], "neckline": ["BOOLEAN", {"default": false}], "epaulette": ["BOOLEAN", {"default": false}], "pocket": ["BOOLEAN", {"default": false}], "buckle": ["BOOLEAN", {"default": false}], "zipper": ["BOOLEAN", {"default": false}], "applique": ["BOOLEAN", {"default": false}], "bow": ["BOOLEAN", {"default": false}], "flower": ["BOOLEAN", {"default": false}], "bead": ["BOOLEAN", {"default": false}], "fringe": ["BOOLEAN", {"default": false}], "ribbon": ["BOOLEAN", {"default": false}], "rivet": ["BOOLEAN", {"default": false}], "ruffle": ["BOOLEAN", {"default": false}], "sequin": ["BOOLEAN", {"default": false}], "tassel": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": [], "optional": ["hat", "glasses", "headband, head covering, hair accessory", "scarf", "tie", "glove", "watch", "belt", "leg warmer", "bag, wallet", "umbrella", "collar", "lapel", "neckline", "epaulette", "pocket", "buckle", "zipper", "applique", "bow", "flower", "bead", "fringe", "ribbon", "rivet", "ruffle", "sequin", "tassel"]}, "is_input_list": false, "output": ["ACCESSORIES_OPTIONS"], "output_is_list": [false], "output_name": ["accessories_options"], "name": "FashionSegmentAccessories", "display_name": "Accessories Segment (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FashionSegmentClothing": {"input": {"required": {"images": ["IMAGE"]}, "optional": {"accessories_options": ["ACCESSORIES_OPTIONS"], "coat": ["BOOLEAN", {"default": false}], "jacket": ["BOOLEAN", {"default": false}], "cardigan": ["BOOLEAN", {"default": false}], "vest": ["BOOLEAN", {"default": false}], "sweater": ["BOOLEAN", {"default": false}], "hood": ["BOOLEAN", {"default": false}], "shirt, blouse": ["BOOLEAN", {"default": false}], "top, t-shirt, sweatshirt": ["BOOLEAN", {"default": false}], "sleeve": ["BOOLEAN", {"default": false}], "dress": ["BOOLEAN", {"default": false}], "jumpsuit": ["BOOLEAN", {"default": false}], "cape": ["BOOLEAN", {"default": false}], "pants": ["BOOLEAN", {"default": false}], "shorts": ["BOOLEAN", {"default": false}], "skirt": ["BOOLEAN", {"default": false}], "tights, stockings": ["BOOLEAN", {"default": false}], "sock": ["BOOLEAN", {"default": false}], "shoe": ["BOOLEAN", {"default": false}], "process_res": ["INT", {"default": 512, "min": 128, "max": 2048, "step": 32, "tooltip": "Processing resolution (higher = more VRAM)"}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Blur amount for mask edges"}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Expand/Shrink mask boundary"}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Invert both image and mask output"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Choose background type: Alpha (transparent) or Color (custom background color)."}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Choose background color (Alpha = transparent)"}]}}, "input_order": {"required": ["images"], "optional": ["accessories_options", "coat", "jacket", "cardigan", "vest", "sweater", "hood", "shirt, blouse", "top, t-shirt, sweatshirt", "sleeve", "dress", "jumpsuit", "cape", "pants", "shorts", "skirt", "tights, stockings", "sock", "shoe", "process_res", "mask_blur", "mask_offset", "invert_output", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "FashionSegmentClothing", "display_name": "Fashion Segment (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_Florence2": {"input": {"required": {"image": ["IMAGE"], "model_name": [["microsoft/Florence-2-base", "microsoft/Florence-2-base-ft", "microsoft/Florence-2-large", "microsoft/Florence-2-large-ft", "thwri/CogFlorence-2.1-Large", "thwri/CogFlorence-2.2-Large"], {"default": "microsoft/Florence-2-base", "tooltip": "Base = stable, +ft = fine-tuned captions, CogFlorence = sharper phrase alignment."}], "task": [["Polygon Mask (text prompt)", "Phrase Grounding (text boxes)", "Region Proposals (boxes only)"], {"default": "Polygon Mask (text prompt)", "tooltip": "Polygon masks use prompts; phrase grounding/region proposals return boxes."}], "precision": [["fp16", "bf16", "fp32"], {"default": "fp16", "tooltip": "Lower precision saves VRAM; fp32 is safest if you hit NaNs."}], "attention": [["flash_attention_2", "sdpa", "eager"], {"default": "sdpa", "tooltip": "flash_attn2 needs PyTorch 2.1+; use eager if kernels fail."}], "fill_mask": ["BOOLEAN", {"default": true, "tooltip": "When true, bbox tasks also output filled mask tensors."}]}, "optional": {"output_mask_select": ["STRING", {"default": "", "tooltip": "Comma-separated indices or labels (e.g. 0,2,person) to limit masks."}], "keep_model_loaded": ["BOOLEAN", {"default": false, "tooltip": "Keep weights on the current device after execution."}], "text_prompt": ["STRING", {"default": "", "multiline": true, "placeholder": "Prompt: e.g. a person wearing red coat", "tooltip": "Used for polygon masks or phrase grounding; ignored for region proposals."}]}}, "input_order": {"required": ["image", "model_name", "task", "precision", "attention", "fill_mask"], "optional": ["output_mask_select", "keep_model_loaded", "text_prompt"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "JSON"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "DATA"], "name": "AILab_Florence2", "display_name": "Florence2 (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_Florence2ToCoordinates": {"input": {"required": {"data": ["JSON", {"tooltip": "Florence2 JSON output (list per image)."}], "index": ["STRING", {"default": "", "tooltip": "Comma-separated indexes; blank = use all boxes from first item."}], "batch": ["BOOLEAN", {"default": false, "tooltip": "If true, gather boxes across the batch."}]}, "optional": {"image": ["IMAGE"]}}, "input_order": {"required": ["data", "index", "batch"], "optional": ["image"]}, "is_input_list": false, "output": ["STRING", "BBOX", "MASK"], "output_is_list": [false, false, false], "output_name": ["CENTER_COORDINATES", "BBOXES", "MASK"], "name": "AILab_Florence2ToCoordinates", "display_name": "Florence2 Box Coordinates (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_LoadImage": {"input": {"required": {"image_path_or_URL": ["STRING", {"default": "", "placeholder": "Local path or URL"}], "image": [["", "2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"], {"image_upload": true}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"], {"default": "lanczos"}], "megapixels": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 16.0, "step": 0.01}], "scale_by": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}], "resize_mode": [["longest_side", "shortest_side", "width", "height"], {"default": "longest_side"}], "size": ["INT", {"default": 0, "min": 0, "max": 16384}]}, "hidden": {"extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["image_path_or_URL", "image", "upscale_method", "megapixels", "scale_by", "resize_mode", "size"], "hidden": ["extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE", "MASK", "WIDTH", "HEIGHT"], "name": "AILab_LoadImage", "display_name": "Load Image (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_LoadImageSimple": {"input": {"required": {"image_path_or_URL": ["STRING", {"default": "", "placeholder": "Local path, network path or URL"}], "image": [["", "2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"], {"image_upload": true}]}, "hidden": {"extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["image_path_or_URL", "image"], "hidden": ["extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE", "MASK", "WIDTH", "HEIGHT"], "name": "AILab_LoadImageSimple", "display_name": "Load Image Basic (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_LoadImageAdvanced": {"input": {"required": {"image_path_or_URL": ["STRING", {"default": "", "placeholder": "Local path or URL"}], "image": [["", "2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"], {"image_upload": true}], "mask_channel": [["alpha", "red", "green", "blue"], {"default": "alpha"}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"], {"default": "lanczos"}], "megapixels": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 16.0, "step": 0.01}], "scale_by": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}], "resize_mode": [["longest_side", "shortest_side", "width", "height"], {"default": "longest_side"}], "size": ["INT", {"default": 0, "min": 0, "max": 16384}]}, "hidden": {"extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["image_path_or_URL", "image", "mask_channel", "upscale_method", "megapixels", "scale_by", "resize_mode", "size"], "hidden": ["extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE", "INT", "INT", "STRING"], "output_is_list": [false, false, false, false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE", "WIDTH", "HEIGHT", "METADATA"], "name": "AILab_LoadImageAdvanced", "display_name": "Load Image Advanced (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_LoadImageBatch": {"input": {"required": {"path_or_urls": ["STRING", {"default": "", "multiline": true, "placeholder": "Path to a directory, comma/new-line separated file paths, OR comma/new-line separated URLs"}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"], {"default": "lanczos"}], "megapixels": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 16.0, "step": 0.01}], "scale_by": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}], "resize_mode": [["longest_side", "shortest_side", "width", "height"], {"default": "longest_side"}], "size": ["INT", {"default": 0, "min": 0, "max": 16384}]}, "optional": {"batch_size": ["INT", {"default": 0, "min": 0, "step": 1, "tooltip": "Number of images to load (0 = all images)"}], "start_from": ["INT", {"default": 1, "min": 1, "step": 1, "tooltip": "Start from Nth image (1 = first image)"}], "sort_method": [["sequential", "reverse", "random"], {"default": "sequential", "tooltip": "Image loading order: sequential/reverse/random"}]}, "hidden": {"extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["path_or_urls", "upscale_method", "megapixels", "scale_by", "resize_mode", "size"], "optional": ["batch_size", "start_from", "sort_method"], "hidden": ["extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT", "INT"], "output_is_list": [true, true, true, true], "output_name": ["IMAGE", "MASK", "WIDTH", "HEIGHT"], "name": "AILab_LoadImageBatch", "display_name": "Load Image Batch (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_UnbatchImages": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, false, false, false, false, false, false, false], "output_name": ["image_1", "image_2", "image_3", "image_4", "image_5", "image_6", "image_7", "image_8"], "name": "AILab_UnbatchImages", "display_name": "Unbatch Images (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "AILab_Preview": {"input": {"optional": {"image": ["IMAGE", {"default": null}], "mask": ["MASK", {"default": null}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"optional": ["image", "mask"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "AILab_Preview", "display_name": "Image/Mask Preview (RMBG) \ud83d\uddbc\ufe0f\ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "AILab_MaskOverlay": {"input": {"required": {"mask_opacity": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Control mask opacity (0.0-1.0)"}], "mask_color": ["COLORCODE", {"default": "#0000FF", "tooltip": "Color for the mask overlay"}]}, "optional": {"image": ["IMAGE", {"tooltip": "Input image (RGBA will be converted to RGB)"}], "mask": ["MASK", {"tooltip": "Input mask"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["mask_opacity", "mask_color"], "optional": ["image", "mask"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "AILab_MaskOverlay", "display_name": "Mask Overlay (RMBG) \ud83d\uddbc\ufe0f\ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImagePreview": {"input": {"required": {"image": ["IMAGE"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["image"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "AILab_ImagePreview", "display_name": "Image Preview (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "AILab_MaskPreview": {"input": {"required": {"mask": ["MASK"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["mask"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "AILab_MaskPreview", "display_name": "Mask Preview (RMBG) \ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImageMaskConvert": {"input": {"required": {}, "optional": {"image": ["IMAGE"], "mask": ["MASK"], "mask_channel": [["alpha", "red", "green", "blue"], {"default": "alpha"}]}}, "input_order": {"required": [], "optional": ["image", "mask", "mask_channel"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "AILab_ImageMaskConvert", "display_name": "Image/Mask Converter (RMBG) \ud83d\uddbc\ufe0f\ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_MaskEnhancer": {"input": {"required": {"mask": ["MASK", {"tooltip": "Input mask to be processed."}]}, "optional": {"sensitivity": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Adjust the strength of mask detection (higher values result in more aggressive detection)."}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Specify the amount of blur to apply to the mask edges (0 for no blur, higher values for more blur)."}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Adjust the mask boundary (positive values expand the mask, negative values shrink it)."}], "smooth": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 128.0, "step": 0.5, "tooltip": "Smooth the mask edges (0 for no smoothing, higher values create smoother edges)."}], "fill_holes": ["BOOLEAN", {"default": false, "tooltip": "Enable to fill holes in the mask."}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Enable to invert the mask output (useful for certain effects)."}]}}, "input_order": {"required": ["mask"], "optional": ["sensitivity", "mask_blur", "mask_offset", "smooth", "fill_holes", "invert_output"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "AILab_MaskEnhancer", "display_name": "Mask Enhancer (RMBG) \ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_MaskCombiner": {"input": {"required": {"mask_1": ["MASK"], "mode": [["combine", "intersection", "difference"], {"default": "combine"}]}, "optional": {"mask_2": ["MASK", {"default": null}], "mask_3": ["MASK", {"default": null}], "mask_4": ["MASK", {"default": null}]}}, "input_order": {"required": ["mask_1", "mode"], "optional": ["mask_2", "mask_3", "mask_4"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "AILab_MaskCombiner", "display_name": "Mask Combiner (RMBG) \ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImageCombiner": {"input": {"required": {"foreground": ["IMAGE"], "background": ["IMAGE"], "mode": [["normal", "multiply", "screen", "overlay", "add", "subtract"], {"default": "normal"}], "foreground_opacity": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "foreground_scale": ["FLOAT", {"default": 1.0, "min": 0.1, "max": 5.0, "step": 0.05}], "position_x": ["INT", {"default": 50, "min": 0, "max": 100, "step": 1}], "position_y": ["INT", {"default": 50, "min": 0, "max": 100, "step": 1}]}, "optional": {"foreground_mask": ["MASK", {"default": null}]}}, "input_order": {"required": ["foreground", "background", "mode", "foreground_opacity", "foreground_scale", "position_x", "position_y"], "optional": ["foreground_mask"]}, "is_input_list": false, "output": ["IMAGE", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "WIDTH", "HEIGHT"], "name": "AILab_ImageCombiner", "display_name": "Image Combiner (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_MaskExtractor": {"input": {"required": {"image": ["IMAGE"], "mode": [["extract_masked_area", "apply_mask", "invert_mask"], {"default": "extract_masked_area"}], "background": [["Alpha", "original", "Color"], {"default": "Alpha", "tooltip": "Choose background type"}], "background_color": ["COLORCODE", {"default": "#FFFFFF", "tooltip": "Choose background color (Alpha = transparent)"}]}, "optional": {"mask": ["MASK"]}}, "input_order": {"required": ["image", "mode", "background", "background_color"], "optional": ["mask"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "AILab_MaskExtractor", "display_name": "Mask Extractor (RMBG) \ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImageStitch": {"input": {"required": {"image1": ["IMAGE"], "stitch_mode": [["right", "down", "left", "up", "2x2", "kontext_mode"], {"default": "right", "tooltip": "Mode for stitching images together"}], "match_image_size": ["BOOLEAN", {"default": true, "tooltip": "If True, resize image2 to match image1's aspect ratio"}], "megapixels": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 16.0, "step": 0.01, "tooltip": "Target megapixels for final output (0 = no limit, overrides max_width/max_height)"}], "max_width": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8, "tooltip": "Maximum width of output image (0 = no limit, ignored if megapixels > 0)"}], "max_height": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8, "tooltip": "Maximum height of output image (0 = no limit, ignored if megapixels > 0)"}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"], {"default": "lanczos", "tooltip": "Upscaling method for all resize operations"}], "spacing_width": ["INT", {"default": 0, "min": 0, "max": 512, "step": 1, "tooltip": "Width of spacing between images"}], "background_color": ["COLORCODE", {"default": "#FFFFFF", "tooltip": "Color for spacing between images and padding background"}]}, "optional": {"image2": ["IMAGE"], "image3": ["IMAGE"], "image4": ["IMAGE"]}}, "input_order": {"required": ["image1", "stitch_mode", "match_image_size", "megapixels", "max_width", "max_height", "upscale_method", "spacing_width", "background_color"], "optional": ["image2", "image3", "image4"]}, "is_input_list": false, "output": ["IMAGE", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "WIDTH", "HEIGHT"], "name": "AILab_ImageStitch", "display_name": "Image Stitch (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImageCrop": {"input": {"required": {"image": ["IMAGE"], "width": ["INT", {"default": 256, "min": 0, "max": 16384, "step": 8, "tooltip": "Width of the crop region in pixels. Will be clamped to image width."}], "height": ["INT", {"default": 256, "min": 0, "max": 16384, "step": 8, "tooltip": "Height of the crop region in pixels. Will be clamped to image height."}], "x_offset": ["INT", {"default": 0, "min": -99999, "step": 1, "tooltip": "Horizontal offset (in pixels) added to the crop position. Positive values move right, negative left."}], "y_offset": ["INT", {"default": 0, "min": -99999, "step": 1, "tooltip": "Vertical offset (in pixels) added to the crop position. Positive values move down, negative up."}], "split": ["BOOLEAN", {"default": false, "tooltip": "If True, output the cropped region and the rest of the image with the crop area set to zero. If False, the rest is a zero image."}], "position": [["top-left", "top-center", "top-right", "right-center", "bottom-right", "bottom-center", "bottom-left", "left-center", "center"], {"tooltip": "Anchor position for the crop region. Determines where the crop is placed relative to the image."}]}}, "input_order": {"required": ["image", "width", "height", "x_offset", "y_offset", "split", "position"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE"], "output_is_list": [false, false], "output_name": ["CROP", "REST"], "name": "AILab_ImageCrop", "display_name": "Image Crop (RMBG) \ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ICLoRAConcat": {"input": {"required": {"object_image": ["IMAGE", {"tooltip": "The main image to be used as the foreground (object) in the concatenation.\nIf the image has 4 channels (RGBA), the alpha channel will be automatically extracted and used as the object mask if no mask is provided."}], "layout": [["top-bottom", "left-right"], {"default": "left-right", "tooltip": "The direction in which to concatenate the images: top-bottom or left-right."}], "custom_size": ["INT", {"default": 0, "max": 16384, "min": 0, "step": 8, "tooltip": "If 0, the output image size is unchanged. Otherwise, sets the base image height (for left-right) or base image width (for top-bottom) in pixels for the concatenation. The object image will be scaled proportionally to match the base image in the concatenation direction."}]}, "optional": {"object_mask": ["MASK", {"tooltip": "Mask for the object_image. Defines the region of the object_image to be blended into the base_image."}], "base_image": ["IMAGE", {"tooltip": "The background image to be concatenated with the object_image.\nIf the image has 4 channels (RGBA), the alpha channel will be automatically extracted and used as the base mask if no mask is provided."}], "base_mask": ["MASK", {"tooltip": "Mask for the base_image. Defines the region of the base_image to be blended with the object_image."}]}}, "input_order": {"required": ["object_image", "layout", "custom_size"], "optional": ["object_mask", "base_image", "base_mask"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "MASK", "INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false, false, false, false], "output_name": ["IMAGE", "OBJECT_MASK", "BASE_MASK", "WIDTH", "HEIGHT", "X", "Y"], "name": "AILab_ICLoRAConcat", "display_name": "IC LoRA Concat (RMBG) \ud83d\uddbc\ufe0f\ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_CropObject": {"input": {"optional": {"image": ["IMAGE"], "mask": ["MASK"], "padding": ["INT", {"default": 0, "min": 0, "max": 256, "step": 1}]}}, "input_order": {"optional": ["image", "mask", "padding"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "AILab_CropObject", "display_name": "Crop To Object (RMBG) \ud83d\uddbc\ufe0f\ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImageCompare": {"input": {"required": {"text1": ["STRING", {"default": "Image 1"}], "text2": ["STRING", {"default": "Image 2"}], "text3": ["STRING", {"default": "Image 3"}], "size_base": [["largest", "smallest", "image1", "image2", "image3"], {"default": "largest"}], "text_color": ["COLORCODE", {"default": "#000000"}], "bg_color": ["COLORCODE", {"default": "#FFFFFF"}]}, "optional": {"image1": ["IMAGE"], "image2": ["IMAGE"], "image3": ["IMAGE"]}}, "input_order": {"required": ["text1", "text2", "text3", "size_base", "text_color", "bg_color"], "optional": ["image1", "image2", "image3"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "AILab_ImageCompare", "display_name": "Side By Side Compare (RMBG) \ud83d\uddbc\ufe0f\ud83d\uddbc\ufe0f", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ColorInput": {"input": {"required": {"preset": [["black", "white", "red", "green", "blue", "yellow", "cyan", "magenta", "gray", "silver", "maroon", "olive", "purple", "teal", "navy", "orange", "pink", "brown", "violet", "indigo", "light_gray", "dark_gray", "light_blue", "dark_blue", "light_green", "dark_green"]], "color": ["STRING", {"default": "", "placeholder": "Enter color code (e.g. #FF0000 or #F00)"}]}}, "input_order": {"required": ["preset", "color"]}, "is_input_list": false, "output": ["COLORCODE"], "output_is_list": [false], "output_name": ["COLOR"], "name": "AILab_ColorInput", "display_name": "Color Input (RMBG) \ud83c\udfa8", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\udee0\ufe0fUTIL/\ud83d\udd04IO", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImageResize": {"input": {"required": {"image": ["IMAGE"], "custom_width": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "custom_height": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "megapixels": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 16.0, "step": 0.01}], "scale_by": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}], "resize_mode": [["longest_side", "shortest_side"], {"default": "longest_side"}], "resize_value": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"], {"default": "lanczos"}], "device": [["cpu", "gpu"], {"default": "cpu"}], "divisible_by": ["INT", {"default": 2, "min": 1, "max": 512, "step": 1}], "output_mode": [["stretch", "pad", "pad_edge", "pad_edge_pixel", "crop", "pillarbox_blur"], {"default": "stretch"}], "crop_position": [["center", "top", "bottom", "left", "right"], {"default": "center"}], "pad_color": ["COLORCODE", {"default": "#FFFFFF", "tooltip": "Padding color (hex)"}]}, "optional": {"mask": ["MASK"]}}, "input_order": {"required": ["image", "custom_width", "custom_height", "megapixels", "scale_by", "resize_mode", "resize_value", "upscale_method", "device", "divisible_by", "output_mode", "crop_position", "pad_color"], "optional": ["mask"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE", "MASK", "WIDTH", "HEIGHT"], "name": "AILab_ImageResize", "display_name": "Image Resize (RMBG) \ud83d\uddbc\ufe0f\ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImageToList": {"input": {"optional": {"image_1": ["IMAGE"], "image_2": ["IMAGE"], "image_3": ["IMAGE"], "image_4": ["IMAGE"], "image_5": ["IMAGE"], "image_6": ["IMAGE"]}, "required": {"resize_mode": [["off", "crop", "fit"], {"default": "crop"}]}}, "input_order": {"optional": ["image_1", "image_2", "image_3", "image_4", "image_5", "image_6"], "required": ["resize_mode"]}, "is_input_list": false, "output": ["IMAGE", "INT", "INT", "INT"], "output_is_list": [true, false, false, false], "output_name": ["IMAGE", "WIDTH", "HEIGHT", "BATCH_SIZE"], "name": "AILab_ImageToList", "display_name": "Image to List (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_MaskToList": {"input": {"optional": {"mask_1": ["MASK"], "mask_2": ["MASK"], "mask_3": ["MASK"], "mask_4": ["MASK"], "mask_5": ["MASK"], "mask_6": ["MASK"]}, "required": {"resize_mode": [["off", "crop", "fit"], {"default": "off"}]}}, "input_order": {"optional": ["mask_1", "mask_2", "mask_3", "mask_4", "mask_5", "mask_6"], "required": ["resize_mode"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [true], "output_name": ["MASK"], "name": "AILab_MaskToList", "display_name": "Mask to List (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\udda0MASK", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ImageMaskToList": {"input": {"optional": {"image_1": ["IMAGE"], "mask_1": ["MASK"], "image_2": ["IMAGE"], "mask_2": ["MASK"], "image_3": ["IMAGE"], "mask_3": ["MASK"], "image_4": ["IMAGE"], "mask_4": ["MASK"], "image_5": ["IMAGE"], "mask_5": ["MASK"], "image_6": ["IMAGE"], "mask_6": ["MASK"]}, "required": {"resize_mode": [["off", "crop", "fit"], {"default": "crop"}]}}, "input_order": {"optional": ["image_1", "mask_1", "image_2", "mask_2", "image_3", "mask_3", "image_4", "mask_4", "image_5", "mask_5", "image_6", "mask_6"], "required": ["resize_mode"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT", "INT", "INT"], "output_is_list": [true, true, false, false, false], "output_name": ["IMAGE", "MASK", "WIDTH", "HEIGHT", "BATCH_SIZE"], "name": "AILab_ImageMaskToList", "display_name": "Image and Mask to List (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83d\uddbc\ufe0fIMAGE", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_ReferenceLatentMask": {"input": {"required": {"conditioning": ["CONDITIONING", {"tooltip": "Base conditioning input for inpainting task"}], "latent": ["LATENT", {"tooltip": "Encoded latent from VAE"}], "mask": ["MASK", {"tooltip": "Area to inpaint (white regions)"}], "expand": ["INT", {"default": 5, "min": -64, "max": 64, "step": 1, "tooltip": "Grow mask (+) or shrink mask (-)"}], "blur": ["FLOAT", {"default": 3.0, "min": 0.0, "max": 64.0, "step": 0.1, "tooltip": "Soften mask edges"}], "mask_only": ["BOOLEAN", {"default": true, "tooltip": "Only generate content in masked area"}]}}, "input_order": {"required": ["conditioning", "latent", "mask", "expand", "blur", "mask_only"]}, "is_input_list": false, "output": ["CONDITIONING", "LATENT", "MASK"], "output_is_list": [false, false, false], "output_name": ["CONDITIONING", "LATENT", "MASK"], "name": "AILab_ReferenceLatentMask", "display_name": "Reference Latent Mask (RMBG) \ud83d\uddbc\ufe0f\ud83c\udfad", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83c\udfadInpaint", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_LamaRemover": {"input": {"required": {"images": ["IMAGE", {"tooltip": "Input images to be processed"}], "masks": ["MASK", {"tooltip": "Masks defining areas to be removed (white=remove)"}], "removal_strength": ["INT", {"default": 230, "min": 0, "max": 255, "step": 1, "display": "slider", "tooltip": "Strength of the removal effect (higher values increase the effect area)"}], "edge_smoothness": ["INT", {"default": 8, "min": 0, "max": 20, "step": 1, "display": "slider", "tooltip": "Controls edge smoothness (higher values create smoother transitions)"}]}}, "input_order": {"required": ["images", "masks", "removal_strength", "edge_smoothness"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "name": "AILab_LamaRemover", "display_name": "Lama Remover (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RMBG": {"input": {"required": {"image": ["IMAGE", {"tooltip": "Input image to be processed for background removal."}], "model": [["RMBG-2.0", "INSPYRENET", "BEN", "BEN2"], {"tooltip": "Select the background removal model to use (RMBG-2.0, INSPYRENET, BEN)."}]}, "optional": {"sensitivity": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Adjust the strength of mask detection (higher values result in more aggressive detection)."}], "process_res": ["INT", {"default": 1024, "min": 256, "max": 2048, "step": 8, "tooltip": "Set the processing resolution (higher values require more VRAM and may increase processing time)."}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Specify the amount of blur to apply to the mask edges (0 for no blur, higher values for more blur)."}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Adjust the mask boundary (positive values expand the mask, negative values shrink it)."}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Enable to invert both the image and mask output (useful for certain effects)."}], "refine_foreground": ["BOOLEAN", {"default": false, "tooltip": "Use Fast Foreground Colour Estimation to optimize transparent background"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Choose output type: Alpha (transparent) or Color (custom background color)."}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Pick background color (supports alpha, use color picker)."}]}}, "input_order": {"required": ["image", "model"], "optional": ["sensitivity", "process_res", "mask_blur", "mask_offset", "invert_output", "refine_foreground", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "RMBG", "display_name": "Remove Background (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SAM2Segment": {"input": {"required": {"image": ["IMAGE"], "prompt": ["STRING", {"default": "", "multiline": true, "placeholder": "Object to segment", "tooltip": "Enter text description of object to segment"}], "sam2_model": [["sam2.1_hiera_tiny", "sam2.1_hiera_small", "sam2.1_hiera_base_plus", "sam2.1_hiera_large"], {"default": "sam2.1_hiera_tiny", "tooltip": "SAM2 model size: Tiny (fastest) to Large (best quality)"}], "dino_model": [["GroundingDINO_SwinT_OGC (694MB)", "GroundingDINO_SwinB (938MB)"], {"default": "GroundingDINO_SwinT_OGC (694MB)", "tooltip": "GroundingDINO model for text-to-box detection"}], "device": [["Auto", "CPU", "GPU"], {"default": "Auto", "tooltip": "Auto: smart detection, CPU: force CPU, GPU: force GPU"}]}, "optional": {"threshold": ["FLOAT", {"default": 0.35, "min": 0.05, "max": 0.95, "step": 0.01, "tooltip": "Detection threshold (higher = more strict)"}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Blur mask edges (0 = disabled)"}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Expand/shrink mask (positive = expand)"}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Invert the mask output"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Background type"}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Background color (when not Alpha)"}]}}, "input_order": {"required": ["image", "prompt", "sam2_model", "dino_model", "device"], "optional": ["threshold", "mask_blur", "mask_offset", "invert_output", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "SAM2Segment", "display_name": "SAM2 Segmentation (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SAM3Segment": {"input": {"required": {"image": ["IMAGE"], "prompt": ["STRING", {"default": "", "multiline": true, "placeholder": "Describe the concept"}], "output_mode": [["Merged", "Separate"], {"default": "Merged"}], "confidence_threshold": ["FLOAT", {"default": 0.5, "min": 0.05, "max": 0.95, "step": 0.01}]}, "optional": {"max_segments": ["INT", {"default": 0, "min": 0, "max": 128, "step": 1}], "segment_pick": ["INT", {"default": 0, "min": 0, "max": 128, "step": 1}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1}], "device": [["Auto", "CPU", "GPU"], {"default": "Auto"}], "invert_output": ["BOOLEAN", {"default": false}], "unload_model": ["BOOLEAN", {"default": false}], "background": [["Alpha", "Color"], {"default": "Alpha"}], "background_color": ["COLORCODE", {"default": "#222222"}]}}, "input_order": {"required": ["image", "prompt", "output_mode", "confidence_threshold"], "optional": ["max_segments", "segment_pick", "mask_blur", "mask_offset", "device", "invert_output", "unload_model", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "SAM3Segment", "display_name": "SAM3 Segmentation (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_SDMatte": {"input": {"required": {"image": ["IMAGE"], "model": [["SDMatte", "SDMatte_plus"], {"default": "SDMatte", "tooltip": "SDMatte model variant: Standard or Plus version"}], "device": [["Auto", "CPU", "GPU"], {"default": "Auto", "tooltip": "Auto: smart detection, CPU: force CPU, GPU: force GPU"}], "process_res": ["INT", {"default": 1024, "min": 256, "max": 2048, "step": 8, "tooltip": "Processing resolution: higher = better quality but slower"}]}, "optional": {"mask": ["MASK", {"tooltip": "Mask: White=foreground, Black=background. If omitted and image has alpha, alpha will be used."}], "transparent_object": ["BOOLEAN", {"default": true, "tooltip": "Whether input image contains transparent objects"}], "mask_refine": ["BOOLEAN", {"default": true, "tooltip": "Enable mask refinement using mask constraints"}], "sensitivity": ["FLOAT", {"default": 0.9, "min": 0.1, "max": 1.0, "step": 0.1, "tooltip": "Sensitivity for mask constraint (0.1-1.0): higher = more strict"}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Blur mask edges (0 = disabled)"}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Expand/shrink mask (positive = expand)"}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Invert the mask output"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Background type for output"}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Background color (when not Alpha)"}]}}, "input_order": {"required": ["image", "model", "device", "process_res"], "optional": ["mask", "transparent_object", "mask_refine", "sensitivity", "mask_blur", "mask_offset", "invert_output", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "AILab_SDMatte", "display_name": "SDMatte Matting (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Segment": {"input": {"required": {"image": ["IMAGE"], "prompt": ["STRING", {"default": "", "multiline": true, "placeholder": "Object to segment", "tooltip": "Enter the object or scene you want to segment. Use tag-style or natural language for more detailed prompts."}], "sam_model": [["sam_vit_h (2.56GB)", "sam_vit_l (1.25GB)", "sam_vit_b (375MB)", "sam_hq_vit_h (2.57GB)", "sam_hq_vit_l (1.25GB)", "sam_hq_vit_b (379MB)"]], "dino_model": [["GroundingDINO_SwinT_OGC (694MB)", "GroundingDINO_SwinB (938MB)"]]}, "optional": {"threshold": ["FLOAT", {"default": 0.3, "min": 0.05, "max": 0.95, "step": 0.01, "tooltip": "Adjust mask detection strength (higher = more strict)"}], "mask_blur": ["INT", {"default": 0, "min": 0, "max": 64, "step": 1, "tooltip": "Apply Gaussian blur to mask edges (0 = disabled)"}], "mask_offset": ["INT", {"default": 0, "min": -64, "max": 64, "step": 1, "tooltip": "Expand/Shrink mask boundary (positive = expand, negative = shrink)"}], "invert_output": ["BOOLEAN", {"default": false, "tooltip": "Invert the mask output"}], "background": [["Alpha", "Color"], {"default": "Alpha", "tooltip": [["Alpha", "Color"], {"default": "Alpha", "tooltip": "Choose background type"}]}], "background_color": ["COLORCODE", {"default": "#222222", "tooltip": "Choose background color (Alpha = transparent)"}]}}, "input_order": {"required": ["image", "prompt", "sam_model", "dino_model"], "optional": ["threshold", "mask_blur", "mask_offset", "invert_output", "background", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "IMAGE"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "MASK_IMAGE"], "name": "Segment", "display_name": "Segmentation V1 (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_YoloV8": {"input": {"required": {"images": ["IMAGE"], "yolo_model": [["bbox/Eyeful_v2-Paired.pt", "bbox/Eyes.pt", "bbox/face_yolov8m.pt", "bbox/face_yolov8n.pt", "bbox/face_yolov8n_v2.pt", "bbox/face_yolov8s.pt", "bbox/hand_yolov8n.pt", "bbox/hand_yolov8s.pt", "bbox/lips_v1.pt", "bbox/yolov8s.pt", "face_yolov8n.pt", "segm/deepfashion2_yolov8s-seg.pt", "segm/face_yolov8m-seg_60.pt", "segm/face_yolov8n-seg2_60.pt", "segm/facial_features_yolo8x-seg.pt", "segm/flowers_seg_yolov8model.pt", "segm/hair_yolov8n-seg_60.pt", "segm/person_yolov8m-seg.pt", "segm/person_yolov8n-seg.pt", "segm/person_yolov8s-seg.pt", "segm/skin_yolov8m-seg_400.pt", "segm/skin_yolov8n-seg_400.pt", "segm/skin_yolov8n-seg_800.pt", "segm/yolov8_butterfly_custom.pt", "segm/yolov8l-seg.pt", "segm/yolov8m-seg.pt", "segm/yolov8n-seg.pt", "segm/yolov8s-seg.pt", "segm/yolov8x-seg.pt"], {"default": "bbox/Eyeful_v2-Paired.pt", "tooltip": "YOLOv8 weights stored under /workspace/ComfyUI/models/ultralytics. Advanced controls available on YOLOv8 Adv."}], "mask_count": [["all", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], {"default": "all", "tooltip": "Merge this many detections. 'all' merges everything (or just the selected index when specified)."}]}, "optional": {"select_mask_index": [["none", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], {"default": "none", "tooltip": "1-based index of the first mask to keep. Use 'none' to start from the first detection."}]}}, "input_order": {"required": ["images", "yolo_model", "mask_count"], "optional": ["select_mask_index"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "MASK"], "output_is_list": [false, false, false], "output_name": ["ANNOTATED_IMAGE", "MASK", "MASK_LIST"], "name": "AILab_YoloV8", "display_name": "YOLOv8 (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AILab_YoloV8Adv": {"input": {"required": {"images": ["IMAGE"], "yolo_model": [["bbox/Eyeful_v2-Paired.pt", "bbox/Eyes.pt", "bbox/face_yolov8m.pt", "bbox/face_yolov8n.pt", "bbox/face_yolov8n_v2.pt", "bbox/face_yolov8s.pt", "bbox/hand_yolov8n.pt", "bbox/hand_yolov8s.pt", "bbox/lips_v1.pt", "bbox/yolov8s.pt", "face_yolov8n.pt", "segm/deepfashion2_yolov8s-seg.pt", "segm/face_yolov8m-seg_60.pt", "segm/face_yolov8n-seg2_60.pt", "segm/facial_features_yolo8x-seg.pt", "segm/flowers_seg_yolov8model.pt", "segm/hair_yolov8n-seg_60.pt", "segm/person_yolov8m-seg.pt", "segm/person_yolov8n-seg.pt", "segm/person_yolov8s-seg.pt", "segm/skin_yolov8m-seg_400.pt", "segm/skin_yolov8n-seg_400.pt", "segm/skin_yolov8n-seg_800.pt", "segm/yolov8_butterfly_custom.pt", "segm/yolov8l-seg.pt", "segm/yolov8m-seg.pt", "segm/yolov8n-seg.pt", "segm/yolov8s-seg.pt", "segm/yolov8x-seg.pt"], {"default": "bbox/Eyeful_v2-Paired.pt", "tooltip": "YOLOv8 weights stored under /workspace/ComfyUI/models/ultralytics (subfolders allowed)."}], "mask_count": [["all", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], {"default": "all", "tooltip": "Merge this many detections. 'all' merges everything (or just the selected index when specified)."}]}, "optional": {"select_mask_index": [["none", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], {"default": "none", "tooltip": "1-based index of the first mask to keep. Use 'none' to start from the first detection."}], "conf": ["FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Confidence threshold forwarded to Ultralytics."}], "iou": ["FLOAT", {"default": 0.45, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "IOU used during NMS."}], "classes": ["STRING", {"default": "", "placeholder": "e.g. 0,2,5-7", "tooltip": "Comma list or ranges of class IDs; empty keeps every class."}], "device": [["auto", "cuda", "cpu", "mps"], {"default": "auto", "tooltip": "Force a device or auto-detect CUDA \u2192 MPS \u2192 CPU."}], "max_det": ["INT", {"default": 300, "min": 1, "max": 1000, "step": 1, "tooltip": "Maximum detections per image."}], "retina_masks": ["BOOLEAN", {"default": true, "tooltip": "Use high-resolution masks (Ultralytics retina_masks flag)."}], "agnostic_nms": ["BOOLEAN", {"default": false, "tooltip": "Enable class-agnostic NMS."}]}}, "input_order": {"required": ["images", "yolo_model", "mask_count"], "optional": ["select_mask_index", "conf", "iou", "classes", "device", "max_det", "retina_masks", "agnostic_nms"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "MASK"], "output_is_list": [false, false, false], "output_name": ["ANNOTATED_IMAGE", "MASK", "MASK_LIST"], "name": "AILab_YoloV8Adv", "display_name": "YOLOv8 Adv (RMBG)", "description": "", "python_module": "custom_nodes.ComfyUI-RMBG", "category": "\ud83e\uddeaAILab/\ud83e\uddfdRMBG", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_VideoCombine": {"input": {"required": {"images": ["IMAGE"], "frame_rate": ["FLOAT", {"default": 8, "min": 1, "step": 1}], "loop_count": ["INT", {"default": 0, "min": 0, "max": 100, "step": 1}], "filename_prefix": ["STRING", {"default": "AnimateDiff"}], "format": [["image/gif", "image/webp", "video/16bit-png", "video/8bit-png", "video/ProRes", "video/av1-webm", "video/ffmpeg-gif", "video/ffv1-mkv", "video/h264-mp4", "video/h265-mp4", "video/nvenc_av1-mp4", "video/nvenc_h264-mp4", "video/nvenc_hevc-mp4", "video/webm"], {"formats": {"video/ProRes": [["profile", ["lt", "standard", "hq", "4444", "4444xq"], {"default": "hq"}]], "video/av1-webm": [["pix_fmt", ["yuv420p10le", "yuv420p"]], ["crf", "INT", {"default": 23, "min": 0, "max": 100, "step": 1}], ["input_color_depth", ["8bit", "16bit"]], ["save_metadata", "BOOLEAN", {"default": true}]], "video/ffmpeg-gif": [["dither", ["bayer", "heckbert", "floyd_steinberg", "sierra2", "sierra2_4a", "sierra3", "burkes", "atkinson", "none"], {"default": "sierra2_4a"}, "[0:v] split [a][b]; [a] palettegen=reserve_transparent=on:transparency_color=ffffff [p]; [b][p] paletteuse=dither=$val"]], "video/ffv1-mkv": [["level", ["0", "1", "3"], {"default": "3"}], ["coder", ["0", "1", "2"], {"default": "1"}], ["context", ["0", "1"], {"default": "1"}], ["gop_size", "INT", {"default": 1, "min": 1, "max": 300, "step": 1}], ["slices", ["4", "6", "9", "12", "16", "20", "24", "30"], {"default": "16"}], ["slicecrc", ["0", "1"], {"default": "1"}], ["pix_fmt", ["rgba64le", "bgra", "yuv420p", "yuv422p", "yuv444p", "yuva420p", "yuva422p", "yuva444p", "yuv420p10le", "yuv422p10le", "yuv444p10le", "yuv420p12le", "yuv422p12le", "yuv444p12le", "yuv420p14le", "yuv422p14le", "yuv444p14le", "yuv420p16le", "yuv422p16le", "yuv444p16le", "gray", "gray10le", "gray12le", "gray16le"], {"default": "rgba64le"}], ["save_metadata", "BOOLEAN", {"default": true}], ["trim_to_audio", "BOOLEAN", {"default": false}]], "video/h264-mp4": [["pix_fmt", ["yuv420p", "yuv420p10le"]], ["crf", "INT", {"default": 19, "min": 0, "max": 100, "step": 1}], ["save_metadata", "BOOLEAN", {"default": true}], ["trim_to_audio", "BOOLEAN", {"default": false}]], "video/h265-mp4": [["pix_fmt", ["yuv420p10le", "yuv420p"]], ["crf", "INT", {"default": 22, "min": 0, "max": 100, "step": 1}], ["save_metadata", "BOOLEAN", {"default": true}]], "video/nvenc_av1-mp4": [["pix_fmt", ["yuv420p", "p010le"]], ["bitrate", "INT", {"default": 10, "min": 1, "max": 999, "step": 1}], ["megabit", "BOOLEAN", {"default": true}], ["save_metadata", "BOOLEAN", {"default": true}]], "video/nvenc_h264-mp4": [["pix_fmt", ["yuv420p", "p010le"]], ["bitrate", "INT", {"default": 10, "min": 1, "max": 999, "step": 1}], ["megabit", "BOOLEAN", {"default": true}], ["save_metadata", "BOOLEAN", {"default": true}]], "video/nvenc_hevc-mp4": [["pix_fmt", ["yuv420p", "p010le"]], ["bitrate", "INT", {"default": 10, "min": 1, "max": 999, "step": 1}], ["megabit", "BOOLEAN", {"default": true}], ["save_metadata", "BOOLEAN", {"default": true}]], "video/webm": [["pix_fmt", ["yuv420p", "yuva420p"]], ["crf", "INT", {"default": 20, "min": 0, "max": 100, "step": 1}], ["save_metadata", "BOOLEAN", {"default": true}], ["trim_to_audio", "BOOLEAN", {"default": false}]], "image/webp": [["lossless", "BOOLEAN", {"default": true}]]}}], "pingpong": ["BOOLEAN", {"default": false}], "save_output": ["BOOLEAN", {"default": true}]}, "optional": {"audio": ["AUDIO"], "meta_batch": ["VHS_BatchManager"], "vae": ["VAE"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["images", "frame_rate", "loop_count", "filename_prefix", "format", "pingpong", "save_output"], "optional": ["audio", "meta_batch", "vae"], "hidden": ["prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["VHS_FILENAMES"], "output_is_list": [false], "output_name": ["Filenames"], "name": "VHS_VideoCombine", "display_name": "Video Combine \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Video Combine \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Combine an image sequence into a video
[-]
Inputs:
[-]
images: The images to be turned into a video
[-]
audio: (optional) audio to add to the video
[-]
meta_batch: (optional) Connect to a Meta Batch manager to divide extremely long image sequences into sub batches. See the documentation for Meta Batch Manager
[-]
vae: (optional) If provided, the node will take latents as input instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences
Unlike on Load Video, this isn't always a strict upgrade over using a standalone VAE Decode.
If you have multiple Video Combine outputs, then the VAE decode will be performed for each output node increasing execution time
If you make any change to output settings on the Video Combine (such as changing the output format), the VAE decode will be performed again as the decoded result is (by design) not cached
[-]
Widgets:
[-]
frame_rate: The frame rate which will be used for the output video. Consider converting this to an input and connecting this to a Load Video with Video Info(Loaded)->fps. When including audio, failure to properly set this will result in audio desync
[-]
loop_count: The number of additional times the video should repeat. Can cause performance issues when used with long (100+ frames) sequences
[-]
filename_prefix: A prefix to add to the name of the output filename. This can include subfolders or format strings.
[-]
format: The output format to use. Formats starting with, 'image' are saved with PIL, but formats starting with 'video' utilize the video_formats system. 'video' options require ffmpeg and selecting one frequently adds additional options to the node.
[-]
pingpong: Play the video normally, then repeat the video in reverse so that it 'pingpongs' back and forth. This is frequently used to minimize the appearance of skips on very short animations.
[-]
save_output: Specifies if output files should be saved to the output folder, or the temporary output folder
[-]
videopreview: Displays a preview for the processed result. If advanced previews is enabled, the output is always converted to a format viewable from the browser. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.
[-]
Common Format Widgets:
[-]
crf: Determines how much to prioritize quality over filesize. Numbers vary between formats, but on each format that includes it, the default value provides visually loss less output
[-]
pix_fmt: The pixel format to use for output. Alternative options will often have higher quality at the cost of increased file size and reduced compatibility with external software.
[-]
yuv420p: The most common and default format
[-]
yuv420p10le: Use 10 bit color depth. This can improve color quality when combined with 16bit input color depth
[-]
yuva420p: Include transparency in the output video
[-]
input_color_depth: VHS supports outputting 16bit images. While this produces higher quality output, the difference usually isn't visible without postprocessing and it significantly increases file size and processing time.
[-]
save_metadata: Determines if metadata for the workflow should be included in the output video file
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadVideo": {"input": {"required": {"video": [[]], "force_rate": ["FLOAT", {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}], "custom_width": ["INT", {"default": 0, "min": 0, "max": 8192, "disable": 0}], "custom_height": ["INT", {"default": 0, "min": 0, "max": 8192, "disable": 0}], "frame_load_cap": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1, "disable": 0}], "skip_first_frames": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}], "select_every_nth": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}]}, "optional": {"meta_batch": ["VHS_BatchManager"], "vae": ["VAE"], "format": [["None", "AnimateDiff", "Mochi", "LTXV", "Hunyuan", "Cosmos", "Wan"], {"default": "AnimateDiff", "formats": {"None": {}, "AnimateDiff": {"target_rate": 8, "dim": [8, 0, 512, 512]}, "Mochi": {"target_rate": 24, "dim": [16, 0, 848, 480], "frames": [6, 1]}, "LTXV": {"target_rate": 24, "dim": [32, 0, 768, 512], "frames": [8, 1]}, "Hunyuan": {"target_rate": 24, "dim": [16, 0, 848, 480], "frames": [4, 1]}, "Cosmos": {"target_rate": 24, "dim": [16, 0, 1280, 704], "frames": [8, 1]}, "Wan": {"target_rate": 16, "dim": [8, 0, 832, 480], "frames": [4, 1]}}}]}, "hidden": {"force_size": "STRING", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["video", "force_rate", "custom_width", "custom_height", "frame_load_cap", "skip_first_frames", "select_every_nth"], "optional": ["meta_batch", "vae", "format"], "hidden": ["force_size", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "INT", "AUDIO", "VHS_VIDEOINFO"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE", "frame_count", "audio", "video_info"], "name": "VHS_LoadVideo", "display_name": "Load Video (Upload) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Video \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Loads a video from the input folder
[-]
Inputs:
[-]
meta_batch: (optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager
[-]
vae: (optional) If provided the node will output latents instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences
Using this is strongly encouraged unless connecting to a node that requires a blue image connection such as Apply Controllnet
[-]
Outputs:
[-]
IMAGE: The loaded images
[-]
frame_count: The length of images just returned
[-]
audio: The audio from the loaded video
[-]
video_info: Exposes additional info about the video such as the source frame rate, or the total length
[-]
LATENT: The loaded images pre-converted to latents. Only available when a vae is connected
[-]
Widgets:
[-]
video: The video file to be loaded. Lists all files with a video extension in the ComfyUI/Input folder
[-]
force_rate: Drops or duplicates frames so that the produced output has the target frame rate. Many motion models are trained on videos of a specific frame rate and will give better results if input matches that frame rate. If set to 0, all frames are returned. May give unusual results with inputs that have a variable frame rate like animated gifs. Reducing this value can also greatly reduce the execution time and memory requirements.
[-]
force_size: Previously was used to provide suggested resolutions. Instead, custom_width and custom_height can be disabled by setting to 0.
[-]
custom_width: Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set
[-]
custom_height: Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set
[-]
frame_load_cap: The maximum number of frames to load. If 0, all frames are loaded.
[-]
skip_first_frames: A number of frames which are discarded before producing output.
[-]
select_every_nth: Similar to frame rate. Keeps only the first of every n frames and discard the rest. Has better compatibility with variable frame rate inputs such as gifs. When combined with force_rate, select_every_nth_applies after force_rate so the resulting output has a frame rate equivalent to force_rate/select_every_nth. select_every_nth does not apply to skip_first_frames
[-]
format: Updates other widgets so that only values supported by the given format can be entered and provides recommended defaults.
[-]
choose video to upload: An upload button is provided to upload local files to the input folder
[-]
videopreview: Displays a preview for the selected video input. If advanced previews is enabled, this preview will reflect the frame_load_cap, force_rate, skip_first_frames, and select_every_nth values chosen. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadVideoPath": {"input": {"required": {"video": ["STRING", {"placeholder": "X://insert/path/here.mp4", "vhs_path_extensions": ["webm", "mp4", "mkv", "gif", "mov"]}], "force_rate": ["FLOAT", {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}], "custom_width": ["INT", {"default": 0, "min": 0, "max": 8192, "disable": 0}], "custom_height": ["INT", {"default": 0, "min": 0, "max": 8192, "disable": 0}], "frame_load_cap": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1, "disable": 0}], "skip_first_frames": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}], "select_every_nth": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}]}, "optional": {"meta_batch": ["VHS_BatchManager"], "vae": ["VAE"], "format": [["None", "AnimateDiff", "Mochi", "LTXV", "Hunyuan", "Cosmos", "Wan"], {"default": "AnimateDiff", "formats": {"None": {}, "AnimateDiff": {"target_rate": 8, "dim": [8, 0, 512, 512]}, "Mochi": {"target_rate": 24, "dim": [16, 0, 848, 480], "frames": [6, 1]}, "LTXV": {"target_rate": 24, "dim": [32, 0, 768, 512], "frames": [8, 1]}, "Hunyuan": {"target_rate": 24, "dim": [16, 0, 848, 480], "frames": [4, 1]}, "Cosmos": {"target_rate": 24, "dim": [16, 0, 1280, 704], "frames": [8, 1]}, "Wan": {"target_rate": 16, "dim": [8, 0, 832, 480], "frames": [4, 1]}}}]}, "hidden": {"force_size": "STRING", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["video", "force_rate", "custom_width", "custom_height", "frame_load_cap", "skip_first_frames", "select_every_nth"], "optional": ["meta_batch", "vae", "format"], "hidden": ["force_size", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "INT", "AUDIO", "VHS_VIDEOINFO"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE", "frame_count", "audio", "video_info"], "name": "VHS_LoadVideoPath", "display_name": "Load Video (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Video (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Loads a video from an arbitrary path
[-]
Inputs:
[-]
meta_batch: (optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager
[-]
vae: (optional) If provided the node will output latents instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences
Using this is strongly encouraged unless connecting to a node that requires a blue image connection such as Apply Controllnet
[-]
Outputs:
[-]
IMAGE: The loaded images
[-]
frame_count: The length of images just returned
[-]
audio: The audio from the loaded video
[-]
video_info: Exposes additional info about the video such as the source frame rate, or the total length
[-]
LATENT: The loaded images pre-converted to latents. Only available when a vae is connected
[-]
Widgets:
[-]
video: The video file to be loaded.
You can also select an image to load it as a single frame
This is a VHS_PATH input. When edited, it provides a list of possible valid files or directories
The current top-most completion may be selected with Tab
You can navigate up a directory by pressing Ctrl+B (or Ctrl+W if supported by browser)
The filter on suggested file types can be disabled by pressing Ctrl+G.
If converted to an input, this functions as a string
[-]
force_rate: Drops or duplicates frames so that the produced output has the target frame rate. Many motion models are trained on videos of a specific frame rate and will give better results if input matches that frame rate. If set to 0, all frames are returned. May give unusual results with inputs that have a variable frame rate like animated gifs. Reducing this value can also greatly reduce the execution time and memory requirements.
[-]
force_size: Previously was used to provide suggested resolutions. Instead, custom_width and custom_height can be disabled by setting to 0.
[-]
custom_width: Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set
[-]
custom_height: Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set
[-]
frame_load_cap: The maximum number of frames to load. If 0, all frames are loaded.
[-]
skip_first_frames: A number of frames which are discarded before producing output.
[-]
select_every_nth: Similar to frame rate. Keeps only the first of every n frames and discard the rest. Has better compatibility with variable frame rate inputs such as gifs. When combined with force_rate, select_every_nth_applies after force_rate so the resulting output has a frame rate equivalent to force_rate/select_every_nth. select_every_nth does not apply to skip_first_frames
[-]
format: Updates other widgets so that only values supported by the given format can be entered and provides recommended defaults.
[-]
videopreview: Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the frame_load_cap, force_rate, skip_first_frames, and select_every_nth values chosen. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadVideoFFmpeg": {"input": {"required": {"video": [[]], "force_rate": ["FLOAT", {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}], "custom_width": ["INT", {"default": 0, "min": 0, "max": 8192, "disable": 0}], "custom_height": ["INT", {"default": 0, "min": 0, "max": 8192, "disable": 0}], "frame_load_cap": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1, "disable": 0}], "start_time": ["FLOAT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 0.001, "widgetType": "VHSTIMESTAMP"}]}, "optional": {"meta_batch": ["VHS_BatchManager"], "vae": ["VAE"], "format": [["None", "AnimateDiff", "Mochi", "LTXV", "Hunyuan", "Cosmos", "Wan"], {"default": "AnimateDiff", "formats": {"None": {}, "AnimateDiff": {"target_rate": 8, "dim": [8, 0, 512, 512]}, "Mochi": {"target_rate": 24, "dim": [16, 0, 848, 480], "frames": [6, 1]}, "LTXV": {"target_rate": 24, "dim": [32, 0, 768, 512], "frames": [8, 1]}, "Hunyuan": {"target_rate": 24, "dim": [16, 0, 848, 480], "frames": [4, 1]}, "Cosmos": {"target_rate": 24, "dim": [16, 0, 1280, 704], "frames": [8, 1]}, "Wan": {"target_rate": 16, "dim": [8, 0, 832, 480], "frames": [4, 1]}}}]}, "hidden": {"force_size": "STRING", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["video", "force_rate", "custom_width", "custom_height", "frame_load_cap", "start_time"], "optional": ["meta_batch", "vae", "format"], "hidden": ["force_size", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "AUDIO", "VHS_VIDEOINFO"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE", "mask", "audio", "video_info"], "name": "VHS_LoadVideoFFmpeg", "display_name": "Load Video FFmpeg (Upload) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Video FFmpeg \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Loads a video from the input folder using ffmpeg instead of opencv
Provides faster execution speed, transparency support, and allows specifying start time in seconds
[-]
Inputs:
[-]
meta_batch: (optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager
[-]
vae: (optional) If provided the node will output latents instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences
Using this is strongly encouraged unless connecting to a node that requires a blue image connection such as Apply Controllnet
[-]
Outputs:
[-]
IMAGE: The loaded images
[-]
mask: Transparency data from the loaded video
[-]
audio: The audio from the loaded video
[-]
video_info: Exposes additional info about the video such as the source frame rate, or the total length
[-]
LATENT: The loaded images pre-converted to latents. Only available when a vae is connected
[-]
Widgets:
[-]
video: The video file to be loaded. Lists all files with a video extension in the ComfyUI/Input folder
[-]
force_rate: Drops or duplicates frames so that the produced output has the target frame rate. Many motion models are trained on videos of a specific frame rate and will give better results if input matches that frame rate. If set to 0, all frames are returned. May give unusual results with inputs that have a variable frame rate like animated gifs. Reducing this value can also greatly reduce the execution time and memory requirements.
[-]
force_size: Previously was used to provide suggested resolutions. Instead, custom_width and custom_height can be disabled by setting to 0.
[-]
custom_width: Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set
[-]
custom_height: Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set
[-]
frame_load_cap: The maximum number of frames to load. If 0, all frames are loaded.
[-]
start_time: A timestamp, in seconds from the start of the video, to start loading frames from.
[-]
format: Updates other widgets so that only values supported by the given format can be entered and provides recommended defaults.
[-]
choose video to upload: An upload button is provided to upload local files to the input folder
[-]
videopreview: Displays a preview for the selected video input. If advanced previews is enabled, this preview will reflect the frame_load_cap, force_rate, skip_first_frames, and select_every_nth values chosen. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadVideoFFmpegPath": {"input": {"required": {"video": ["STRING", {"placeholder": "X://insert/path/here.mp4", "vhs_path_extensions": ["webm", "mp4", "mkv", "gif", "mov"]}], "force_rate": ["FLOAT", {"default": 0, "min": 0, "max": 60, "step": 1, "disable": 0}], "custom_width": ["INT", {"default": 0, "min": 0, "max": 8192, "disable": 0}], "custom_height": ["INT", {"default": 0, "min": 0, "max": 8192, "disable": 0}], "frame_load_cap": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1, "disable": 0}], "start_time": ["FLOAT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 0.001, "widgetType": "VHSTIMESTAMP"}]}, "optional": {"meta_batch": ["VHS_BatchManager"], "vae": ["VAE"], "format": [["None", "AnimateDiff", "Mochi", "LTXV", "Hunyuan", "Cosmos", "Wan"], {"default": "AnimateDiff", "formats": {"None": {}, "AnimateDiff": {"target_rate": 8, "dim": [8, 0, 512, 512]}, "Mochi": {"target_rate": 24, "dim": [16, 0, 848, 480], "frames": [6, 1]}, "LTXV": {"target_rate": 24, "dim": [32, 0, 768, 512], "frames": [8, 1]}, "Hunyuan": {"target_rate": 24, "dim": [16, 0, 848, 480], "frames": [4, 1]}, "Cosmos": {"target_rate": 24, "dim": [16, 0, 1280, 704], "frames": [8, 1]}, "Wan": {"target_rate": 16, "dim": [8, 0, 832, 480], "frames": [4, 1]}}}]}, "hidden": {"force_size": "STRING", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["video", "force_rate", "custom_width", "custom_height", "frame_load_cap", "start_time"], "optional": ["meta_batch", "vae", "format"], "hidden": ["force_size", "unique_id"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "AUDIO", "VHS_VIDEOINFO"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE", "mask", "audio", "video_info"], "name": "VHS_LoadVideoFFmpegPath", "display_name": "Load Video FFmpeg (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Video FFmpeg (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Loads a video from an arbitrary path using ffmpeg instead of opencv
Provides faster execution speed, transparency support, and allows specifying start time in seconds
[-]
Inputs:
[-]
meta_batch: (optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager
[-]
vae: (optional) If provided the node will output latents instead of images. This drastically reduces the required RAM (not VRAM) when working with long (100+ frames) sequences
Using this is strongly encouraged unless connecting to a node that requires a blue image connection such as Apply Controllnet
[-]
Outputs:
[-]
IMAGE: The loaded images
[-]
mask: Transparency data from the loaded video
[-]
audio: The audio from the loaded video
[-]
video_info: Exposes additional info about the video such as the source frame rate, or the total length
[-]
LATENT: The loaded images pre-converted to latents. Only available when a vae is connected
[-]
Widgets:
[-]
video: The video file to be loaded.
You can also select an image to load it as a single frame
This is a VHS_PATH input. When edited, it provides a list of possible valid files or directories
The current top-most completion may be selected with Tab
You can navigate up a directory by pressing Ctrl+B (or Ctrl+W if supported by browser)
The filter on suggested file types can be disabled by pressing Ctrl+G.
If converted to an input, this functions as a string
[-]
force_rate: Drops or duplicates frames so that the produced output has the target frame rate. Many motion models are trained on videos of a specific frame rate and will give better results if input matches that frame rate. If set to 0, all frames are returned. May give unusual results with inputs that have a variable frame rate like animated gifs. Reducing this value can also greatly reduce the execution time and memory requirements.
[-]
force_size: Previously was used to provide suggested resolutions. Instead, custom_width and custom_height can be disabled by setting to 0.
[-]
custom_width: Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set
[-]
custom_height: Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set
[-]
frame_load_cap: The maximum number of frames to load. If 0, all frames are loaded.
[-]
skip_first_frames: A number of frames which are discarded before producing output.
[-]
select_every_nth: Similar to frame rate. Keeps only the first of every n frames and discard the rest. Has better compatibility with variable frame rate inputs such as gifs. When combined with force_rate, select_every_nth_applies after force_rate so the resulting output has a frame rate equivalent to force_rate/select_every_nth. select_every_nth does not apply to skip_first_frames
[-]
format: Updates other widgets so that only values supported by the given format can be entered and provides recommended defaults.
[-]
videopreview: Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the frame_load_cap, force_rate, skip_first_frames, and select_every_nth values chosen. If the video has audio, it will also be previewed when moused over. Additional preview options can be accessed with right click.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadImagePath": {"input": {"required": {"image": ["STRING", {"placeholder": "X://insert/path/here.png", "vhs_path_extensions": [".jpg", ".webp", ".tif", ".tiff", ".png", ".jpeg", ".ppm", ".pgm", ".bmp"]}], "custom_width": ["INT", {"default": 0, "min": 0, "max": 8192, "step": 8, "disable": 0}], "custom_height": ["INT", {"default": 0, "min": 0, "max": 8192, "step": 8, "disable": 0}]}, "optional": {"vae": ["VAE"]}, "hidden": {"force_size": "STRING"}}, "input_order": {"required": ["image", "custom_width", "custom_height"], "optional": ["vae"], "hidden": ["force_size"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "mask"], "name": "VHS_LoadImagePath", "display_name": "Load Image (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Image (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Load a single image from a given path
[-]
Inputs:
[-]
vae: (optional) If provided the node will output latents instead of images.
[-]
Outputs:
[-]
IMAGE: The loaded images
[-]
MASK: The alpha channel of the loaded images.
[-]
Widgets:
[-]
image: The image file to be loaded.
This is a VHS_PATH input. When edited, it provides a list of possible valid files or directories
The current top-most completion may be selected with Tab
You can navigate up a directory by pressing Ctrl+B (or Ctrl+W if supported by browser)
The filter on suggested file types can be disabled by pressing Ctrl+G.
If converted to an input, this functions as a string
[-]
force_size: Allows for conveniently scaling the input without requiring an additional node. Provides options to maintain aspect ratio or conveniently target common training formats for Animate Diff
[-]
custom_width: Allows for an arbitrary width to be entered, cropping to maintain aspect ratio if both are set
[-]
custom_height: Allows for an arbitrary height to be entered, cropping to maintain aspect ratio if both are set
[-]
videopreview: Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the image_load_cap, skip_first_images, and select_every_nth values chosen. Additional preview options can be accessed with right click.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadImages": {"input": {"required": {"directory": [["3d", ".ipynb_checkpoints"]]}, "optional": {"image_load_cap": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}], "skip_first_images": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}], "select_every_nth": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}], "meta_batch": ["VHS_BatchManager"]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["directory"], "optional": ["image_load_cap", "skip_first_images", "select_every_nth", "meta_batch"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "frame_count"], "name": "VHS_LoadImages", "display_name": "Load Images (Upload) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Images \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Loads a sequence of images from a subdirectory of the input folder
[-]
Inputs:
[-]
meta_batch: (optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager
[-]
Outputs:
[-]
IMAGE: The loaded images
[-]
MASK: The alpha channel of the loaded images.
[-]
frame_count: The length of images just returned
[-]
Widgets:
[-]
directory: The directory images will be loaded from. Filtered to process jpg, png, ppm, bmp, tif, and webp files
[-]
image_load_cap: The maximum number of images to load. If 0, all images are loaded.
[-]
start_time: A timestamp, in seconds from the start of the video, to start loading frames from.
[-]
choose folder to upload: An upload button is provided to upload a local folder containing images to the input folder
[-]
videopreview: Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the image_load_cap, skip_first_images, and select_every_nth values chosen. Additional preview options can be accessed with right click.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadImagesPath": {"input": {"required": {"directory": ["STRING", {"placeholder": "X://path/to/images", "vhs_path_extensions": []}]}, "optional": {"image_load_cap": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}], "skip_first_images": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}], "select_every_nth": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}], "meta_batch": ["VHS_BatchManager"]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["directory"], "optional": ["image_load_cap", "skip_first_images", "select_every_nth", "meta_batch"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "MASK", "frame_count"], "name": "VHS_LoadImagesPath", "display_name": "Load Images (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Images (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Loads a sequence of images from an arbitrary path
[-]
Inputs:
[-]
meta_batch: (optional) Connect to a Meta Batch manager to divide extremely long sequences into sub batches. See the documentation for Meta Batch Manager
[-]
Outputs:
[-]
IMAGE: The loaded images
[-]
MASK: The alpha channel of the loaded images.
[-]
frame_count: The length of images just returned
[-]
Widgets:
[-]
directory: The directory images will be loaded from. Filtered to process jpg, png, ppm, bmp, tif, and webp files
This is a VHS_PATH input. When edited, it provides a list of possible valid files or directories
The current top-most completion may be selected with Tab
You can navigate up a directory by pressing Ctrl+B (or Ctrl+W if supported by browser)
The filter on suggested file types can be disabled by pressing Ctrl+G.
If converted to an input, this functions as a string
[-]
image_load_cap: The maximum number of images to load. If 0, all images are loaded.
[-]
skip_first_images: A number of images which are discarded before producing output.
[-]
select_every_nth: Keeps only the first of every n frames and discard the rest.
[-]
videopreview: Displays a preview for the selected video input. Will only be shown if Advanced Previews is enabled. This preview will reflect the image_load_cap, skip_first_images, and select_every_nth values chosen. Additional preview options can be accessed with right click.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadAudio": {"input": {"required": {"audio_file": ["STRING", {"default": "input/", "vhs_path_extensions": ["wav", "mp3", "ogg", "m4a", "flac"]}]}, "optional": {"seek_seconds": ["FLOAT", {"default": 0, "min": 0, "widgetType": "VHSTIMESTAMP"}], "duration": ["FLOAT", {"default": 0, "min": 0, "max": 10000000, "step": 0.01, "widgetType": "VHSTIMESTAMP"}]}}, "input_order": {"required": ["audio_file"], "optional": ["seek_seconds", "duration"]}, "is_input_list": false, "output": ["AUDIO", "FLOAT"], "output_is_list": [false, false], "output_name": ["audio", "duration"], "name": "VHS_LoadAudio", "display_name": "Load Audio (Path)\ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Audio (Path) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Loads an audio file from an arbitrary path
[-]
Outputs:
[-]
audio: The loaded audio
[-]
Widgets:
[-]
audio_file: The audio file to be loaded.
This is a VHS_PATH input. When edited, it provides a list of possible valid files or directories
The current top-most completion may be selected with Tab
You can navigate up a directory by pressing Ctrl+B (or Ctrl+W if supported by browser)
The filter on suggested file types can be disabled by pressing Ctrl+G.
If converted to an input, this functions as a string
[-]
seek_seconds: An offset from the start of the sound file that the audio should start from
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_LoadAudioUpload": {"input": {"required": {"audio": [[]]}, "optional": {"start_time": ["FLOAT", {"default": 0, "min": 0, "max": 10000000, "step": 0.01, "widgetType": "VHSTIMESTAMP"}], "duration": ["FLOAT", {"default": 0, "min": 0, "max": 10000000, "step": 0.01, "widgetType": "VHSTIMESTAMP"}]}}, "input_order": {"required": ["audio"], "optional": ["start_time", "duration"]}, "is_input_list": false, "output": ["AUDIO", "FLOAT"], "output_is_list": [false, false], "output_name": ["audio", "duration"], "name": "VHS_LoadAudioUpload", "display_name": "Load Audio (Upload)\ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Load Audio (Upload) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Loads an audio file from the input directory
Very similar in functionality to the built-in LoadAudio. It was originally added before VHS swapped to use Comfy's internal AUDIO format, but provides the additional options for start time and duration
[-]
Outputs:
[-]
audio: The loaded audio
[-]
Widgets:
[-]
audio: The audio file to be loaded.
[-]
start_time: An offset from the start of the sound file that the audio should start from
[-]
duration: A maximum limit for the audio. Disabled if 0
[-]
choose audio to upload: An upload button is provided to upload an audio file to the input folder
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_AudioToVHSAudio": {"input": {"required": {"audio": ["AUDIO"]}}, "input_order": {"required": ["audio"]}, "is_input_list": false, "output": ["VHS_AUDIO"], "output_is_list": [false], "output_name": ["vhs_audio"], "name": "VHS_AudioToVHSAudio", "display_name": "Audio to legacy VHS_AUDIO\ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Audio to legacy VHS_AUDIO \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
utility function for compatibility with external nodes
VHS used to use an internal VHS_AUDIO format for routing audio between inputs and outputs. This format was intended to only be used internally and was designed with a focus on performance over ease of use. Since ComfyUI now has an internal AUDIO format, VHS now uses this format. However, some custom node packs were made that are external to both ComfyUI and VHS that use VHS_AUDIO. This node was added so that those external nodes can still function
[-]
Inputs:
[-]
audio: An input in the standardized AUDIO format
[-]
Outputs:
[-]
vhs_audio: An output in the legacy VHS_AUDIO format for use with external nodes
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_VHSAudioToAudio": {"input": {"required": {"vhs_audio": ["VHS_AUDIO"]}}, "input_order": {"required": ["vhs_audio"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["audio"], "name": "VHS_VHSAudioToAudio", "display_name": "Legacy VHS_AUDIO to Audio\ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Legacy VHS_AUDIO to Audio \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
utility function for compatibility with external nodes
VHS used to use an internal VHS_AUDIO format for routing audio between inputs and outputs. This format was intended to only be used internally and was designed with a focus on performance over ease of use. Since ComfyUI now has an internal AUDIO format, VHS now uses this format. However, some custom node packs were made that are external to both ComfyUI and VHS that use VHS_AUDIO. This node was added so that those external nodes can still function
[-]
Inputs:
[-]
vhs_audio: An input in the legacy VHS_AUDIO format produced by an external node
[-]
Outputs:
[-]
vhs_audio: An output in the standardized AUDIO format
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_PruneOutputs": {"input": {"required": {"filenames": ["VHS_FILENAMES"], "options": [["Intermediate", "Intermediate and Utility"]]}}, "input_order": {"required": ["filenames", "options"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "VHS_PruneOutputs", "display_name": "Prune Outputs \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Prune Outputs \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Automates deletion of undesired outputs from a Video Combine node.
Video Combine produces a number of file outputs in addition to the final output. Some of these, such as a video file without audio included, are implementation limitations and are not feasible to solve. As an alternative, the Prune Outputs node is added to automate the deletion of these file outputs if they are not desired
[-]
Inputs:
[-]
filenames: A connection from a Video Combine node to indicate which outputs should be pruned
[-]
Widgets:
[-]
options: Which files should be deleted
[-]
Intermediate: Delete any files that were required for intermediate processing but are not the final output, like the no-audio output file when audio is included
[-]
Intermediate and Utility: Delete all produced files that aren't the final output, including the first frame png
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "VHS_BatchManager": {"input": {"required": {"frames_per_batch": ["INT", {"default": 16, "min": 1, "max": 9007199254740991, "step": 1}]}, "hidden": {"prompt": "PROMPT", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["frames_per_batch"], "hidden": ["prompt", "unique_id"]}, "is_input_list": false, "output": ["VHS_BatchManager"], "output_is_list": [false], "output_name": ["meta_batch"], "name": "VHS_BatchManager", "display_name": "Meta Batch Manager \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Meta Batch Manager \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Split the processing of a very long video into sets of smaller Meta Batches
The Meta Batch Manager allows for extremely long input videos to be processed when all other methods for fitting the content in RAM fail. It does not effect VRAM usage.
It must be connected to at least one Input (a Load Video or Load Images) AND at least one Video Combine
It functions by holding both the inputs and ouputs open between executions, and automatically requeue's the workflow until one of the inputs is unable to provide additional images.
Because each sub execution only contains a subset of the total frames, each sub execution creates a hard window which temporal smoothing can not be applied across. This results in jumps in the output.
[-]
Outputs:
[-]
meta_batch: Add all connected nodes to this Meta Batch
[-]
Widgets:
[-]
frames_per_batch: How many frames to process for each sub execution. If loading as image, each frame will use about 50MB of RAM (not VRAM), and this can safely be set in the 100-1000 range, depending on available memory. When loading and combining from latent space (no blue image noodles exist), this value can be much higher, around the 2,000 to 20,000 range
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_VideoInfo": {"input": {"required": {"video_info": ["VHS_VIDEOINFO"]}}, "input_order": {"required": ["video_info"]}, "is_input_list": false, "output": ["FLOAT", "INT", "FLOAT", "INT", "INT", "FLOAT", "INT", "FLOAT", "INT", "INT"], "output_is_list": [false, false, false, false, false, false, false, false, false, false], "output_name": ["source_fps\ud83d\udfe8", "source_frame_count\ud83d\udfe8", "source_duration\ud83d\udfe8", "source_width\ud83d\udfe8", "source_height\ud83d\udfe8", "loaded_fps\ud83d\udfe6", "loaded_frame_count\ud83d\udfe6", "loaded_duration\ud83d\udfe6", "loaded_width\ud83d\udfe6", "loaded_height\ud83d\udfe6"], "name": "VHS_VideoInfo", "display_name": "Video Info \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Video Info \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Splits information on a video into a numerous outputs
[-]
Inputs:
[-]
video_info: A connection to a Load Video node
[-]
Outputs:
[-]
source_fps\ud83d\udfe8: The frame rate of the video
[-]
source_frame_count\ud83d\udfe8: How many total frames the video contains before accounting for frame rate or select_every_nth
[-]
source_duration\ud83d\udfe8: The length of images just returned in seconds
[-]
source_width\ud83d\udfe8: The width
[-]
source_height\ud83d\udfe8: The height
[-]
loaded_fps\ud83d\udfe6: The frame rate after accounting for force_rate and select_every_nth. This output is of particular use as it can be connected to the converted frame_rate input of a Video Combine node to ensure audio remains synchronized.
[-]
loaded_frame_count\ud83d\udfe6: The number of frames returned by the current execution. Identical to the frame_count returned by the node itself
[-]
loaded_duration\ud83d\udfe6: The duration in seconds of returned images after accounting for frame_load_cap
[-]
loaded_width\ud83d\udfe6: The width of the video after scaling. These coordinates are in image space even if loading to latent space
[-]
loaded_height\ud83d\udfe6: The height of the video after scaling. These coordinates are in image space even if loading to latent space
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_VideoInfoSource": {"input": {"required": {"video_info": ["VHS_VIDEOINFO"]}}, "input_order": {"required": ["video_info"]}, "is_input_list": false, "output": ["FLOAT", "INT", "FLOAT", "INT", "INT"], "output_is_list": [false, false, false, false, false], "output_name": ["fps\ud83d\udfe8", "frame_count\ud83d\udfe8", "duration\ud83d\udfe8", "width\ud83d\udfe8", "height\ud83d\udfe8"], "name": "VHS_VideoInfoSource", "display_name": "Video Info (Source) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Video Info Source \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Splits information on a video into a numerous outputs describing the file itself without accounting for load options
[-]
Inputs:
[-]
video_info: A connection to a Load Video node
[-]
Outputs:
[-]
source_fps\ud83d\udfe8: The frame rate of the video
[-]
source_frame_count\ud83d\udfe8: How many total frames the video contains before accounting for frame rate or select_every_nth
[-]
source_duration\ud83d\udfe8: The length of images just returned in seconds
[-]
source_width\ud83d\udfe8: The original width
[-]
source_height\ud83d\udfe8: The original height
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_VideoInfoLoaded": {"input": {"required": {"video_info": ["VHS_VIDEOINFO"]}}, "input_order": {"required": ["video_info"]}, "is_input_list": false, "output": ["FLOAT", "INT", "FLOAT", "INT", "INT"], "output_is_list": [false, false, false, false, false], "output_name": ["fps\ud83d\udfe6", "frame_count\ud83d\udfe6", "duration\ud83d\udfe6", "width\ud83d\udfe6", "height\ud83d\udfe6"], "name": "VHS_VideoInfoLoaded", "display_name": "Video Info (Loaded) \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Video Info Loaded \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Splits information on a video into a numerous outputs describing the file itself after accounting for load options
[-]
Inputs:
[-]
video_info: A connection to a Load Video node
[-]
Outputs:
[-]
loaded_fps\ud83d\udfe6: The frame rate after accounting for force_rate and select_every_nth. This output is of particular use as it can be connected to the converted frame_rate input of a Video Combine node to ensure audio remains synchronized.
[-]
loaded_frame_count\ud83d\udfe6: The number of frames returned by the current execution. Identical to the frame_count returned by the node itself
[-]
loaded_duration\ud83d\udfe6: The duration in seconds of returned images after accounting for frame_load_cap
[-]
loaded_width\ud83d\udfe6: The width of the video after scaling. This is the dimension of the corresponding image even if loading as a latent directly
[-]
loaded_height\ud83d\udfe6: The height of the video after scaling. This is the dimension of the corresponding image even if loading as a latent directly
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SelectFilename": {"input": {"required": {"filenames": ["VHS_FILENAMES"], "index": ["INT", {"default": -1, "step": 1, "min": -1}]}}, "input_order": {"required": ["filenames", "index"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["Filename"], "name": "VHS_SelectFilename", "display_name": "Select Filename \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "VAE Select Filename \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Select a single filename from the VHS_FILENAMES output by a Video Combine and return it as a string
Take care when combining this node with Prune Outputs. The VHS_FILENAMES object is immutable and will always contain the full list of output files, but execution order is undefined behavior (currently, Prune Outputs will generally execute first) and SelectFilename may return a path to a file that no longer exists.
[-]
Inputs:
[-]
filenames: A VHS_FILENAMES from a Video Combine node
[-]
Outputs:
[-]
filename: A string representation of the full output path for the chosen file
[-]
Widgets:
[-]
index: The index of which file should be selected. The default, -1, chooses the most complete output
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_VAEEncodeBatched": {"input": {"required": {"pixels": ["IMAGE"], "vae": ["VAE"], "per_batch": ["INT", {"default": 16, "min": 1}]}}, "input_order": {"required": ["pixels", "vae", "per_batch"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "VHS_VAEEncodeBatched", "display_name": "VAE Encode Batched \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "VAE Encode Batched \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Encode images as latents with a manually specified batch size.
Some people have ran into VRAM issues when encoding or decoding large batches of images. As a workaround, this node lets you manually set a batch size when encoding images.
Unless these issues have been encountered, it is simpler to use the native VAE Encode or to encode directly from a Load Video
[-]
Inputs:
[-]
pixels: The images to be encoded.
[-]
vae: The VAE to use when encoding.
[-]
Outputs:
[-]
LATENT: The encoded latents.
[-]
Widgets:
[-]
per_batch: The maximum number of images to encode in each batch.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/batched nodes", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_VAEDecodeBatched": {"input": {"required": {"samples": ["LATENT"], "vae": ["VAE"], "per_batch": ["INT", {"default": 16, "min": 1}]}}, "input_order": {"required": ["samples", "vae", "per_batch"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "VHS_VAEDecodeBatched", "display_name": "VAE Decode Batched \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "VAE Decode Batched \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Decode latents to images with a manually specified batch size
Some people have ran into VRAM issues when encoding or decoding large batches of images. As a workaround, this node lets you manually set a batch size when decoding latents.
Unless these issues have been encountered, it is simpler to use the native VAE Decode or to decode from a Video Combine directly
[-]
Inputs:
[-]
samples: The latents to be decoded.
[-]
vae: The VAE to use when decoding.
[-]
Outputs:
[-]
IMAGE: The decoded images.
[-]
Widgets:
[-]
per_batch: The maximum number of images to decode in each batch.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/batched nodes", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SplitLatents": {"input": {"required": {"latents": ["LATENT"], "split_index": ["INT", {"default": 0, "step": 1, "min": -9007199254740991, "max": 9007199254740991}]}}, "input_order": {"required": ["latents", "split_index"]}, "is_input_list": false, "output": ["LATENT", "INT", "LATENT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["LATENT_A", "A_count", "LATENT_B", "B_count"], "name": "VHS_SplitLatents", "display_name": "Split Latents \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Split Latents \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Split a set of latents into two groups
[-]
Inputs:
[-]
latents: The latents to be split.
[-]
Outputs:
[-]
LATENT_A: The first group of latents
[-]
A_count: The number of latents in group A. This will be equal to split_index unless the latents input has length less than split_index
[-]
LATENT_B: The second group of latents
[-]
B_count: The number of latents in group B
[-]
Widgets:
[-]
split_index: The index of the first latent that will be in the second output groups.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/latent", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SplitImages": {"input": {"required": {"images": ["IMAGE"], "split_index": ["INT", {"default": 0, "step": 1, "min": -9007199254740991, "max": 9007199254740991}]}}, "input_order": {"required": ["images", "split_index"]}, "is_input_list": false, "output": ["IMAGE", "INT", "IMAGE", "INT"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE_A", "A_count", "IMAGE_B", "B_count"], "name": "VHS_SplitImages", "display_name": "Split Images \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Split Images \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Split a set of images into two groups
[-]
Inputs:
[-]
images: The images to be split.
[-]
Outputs:
[-]
IMAGE_A: The first group of images
[-]
A_count: The number of images in group A. This will be equal to split_index unless the images input has length less than split_index
[-]
IMAGE_B: The second group of images
[-]
B_count: The number of images in group B
[-]
Widgets:
[-]
split_index: The index of the first latent that will be in the second output groups.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SplitMasks": {"input": {"required": {"mask": ["MASK"], "split_index": ["INT", {"default": 0, "step": 1, "min": -9007199254740991, "max": 9007199254740991}]}}, "input_order": {"required": ["mask", "split_index"]}, "is_input_list": false, "output": ["MASK", "INT", "MASK", "INT"], "output_is_list": [false, false, false, false], "output_name": ["MASK_A", "A_count", "MASK_B", "B_count"], "name": "VHS_SplitMasks", "display_name": "Split Masks \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Split Masks \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Split a set of masks into two groups
[-]
Inputs:
[-]
mask: The masks to be split.
[-]
Outputs:
[-]
MASK_A: The first group of masks
[-]
A_count: The number of masks in group A. This will be equal to split_index unless the mask input has length less than split_index
[-]
MASK_B: The second group of masks
[-]
B_count: The number of masks in group B
[-]
Widgets:
[-]
split_index: The index of the first latent that will be in the second output groups.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/mask", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_MergeLatents": {"input": {"required": {"latents_A": ["LATENT"], "latents_B": ["LATENT"], "merge_strategy": [["match A", "match B", "match smaller", "match larger"]], "scale_method": [["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]], "crop": [["disabled", "center"]]}}, "input_order": {"required": ["latents_A", "latents_B", "merge_strategy", "scale_method", "crop"]}, "is_input_list": false, "output": ["LATENT", "INT"], "output_is_list": [false, false], "output_name": ["LATENT", "count"], "name": "VHS_MergeLatents", "display_name": "Merge Latents \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Merge Latents \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Combine two groups of latents into a single group of latents
[-]
Inputs:
[-]
latents_A: The first group of latents
[-]
latents_B: The first group of latents
[-]
Outputs:
[-]
LATENT: The combined group of latents
[-]
count: The length of the combined group
[-]
Widgets:
[-]
merge_strategy: Determines what the output resolution will be if input resolutions don't match
[-]
match A: Always use the resolution for A
[-]
match B: Always use the resolution for B
[-]
match smaller: Pick the smaller resolution by area
[-]
match larger: Pick the larger resolution by area
[-]
scale_method: Determines what method to use if scaling is required
[-]
crop: When sizes don't match, should the resized image have it's aspect ratio changed, or be cropped to maintain aspect ratio
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/latent", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_MergeImages": {"input": {"required": {"images_A": ["IMAGE"], "images_B": ["IMAGE"], "merge_strategy": [["match A", "match B", "match smaller", "match larger"]], "scale_method": [["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]], "crop": [["disabled", "center"]]}}, "input_order": {"required": ["images_A", "images_B", "merge_strategy", "scale_method", "crop"]}, "is_input_list": false, "output": ["IMAGE", "INT"], "output_is_list": [false, false], "output_name": ["IMAGE", "count"], "name": "VHS_MergeImages", "display_name": "Merge Images \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Merge Images \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Combine two groups of images into a single group of images
[-]
Inputs:
[-]
images_A: The first group of images
[-]
images_B: The first group of images
[-]
Outputs:
[-]
IMAGE: The combined group of images
[-]
count: The length of the combined group
[-]
Widgets:
[-]
merge_strategy: Determines what the output resolution will be if input resolutions don't match
[-]
match A: Always use the resolution for A
[-]
match B: Always use the resolution for B
[-]
match smaller: Pick the smaller resolution by area
[-]
match larger: Pick the larger resolution by area
[-]
scale_method: Determines what method to use if scaling is required
[-]
crop: When sizes don't match, should the resized image have it's aspect ratio changed, or be cropped to maintain aspect ratio
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_MergeMasks": {"input": {"required": {"mask_A": ["MASK"], "mask_B": ["MASK"], "merge_strategy": [["match A", "match B", "match smaller", "match larger"]], "scale_method": [["nearest-exact", "bilinear", "area", "bicubic", "bislerp"]], "crop": [["disabled", "center"]]}}, "input_order": {"required": ["mask_A", "mask_B", "merge_strategy", "scale_method", "crop"]}, "is_input_list": false, "output": ["MASK", "INT"], "output_is_list": [false, false], "output_name": ["MASK", "count"], "name": "VHS_MergeMasks", "display_name": "Merge Masks \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Merge Masks \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Combine two groups of masks into a single group of masks
[-]
Inputs:
[-]
mask_A: The first group of masks
[-]
mask_B: The first group of masks
[-]
Outputs:
[-]
MASK: The combined group of masks
[-]
count: The length of the combined group
[-]
Widgets:
[-]
merge_strategy: Determines what the output resolution will be if input resolutions don't match
[-]
match A: Always use the resolution for A
[-]
match B: Always use the resolution for B
[-]
match smaller: Pick the smaller resolution by area
[-]
match larger: Pick the larger resolution by area
[-]
scale_method: Determines what method to use if scaling is required
[-]
crop: When sizes don't match, should the resized image have it's aspect ratio changed, or be cropped to maintain aspect ratio
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/mask", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_GetLatentCount": {"input": {"required": {"latents": ["LATENT"]}}, "input_order": {"required": ["latents"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["count"], "name": "VHS_GetLatentCount", "display_name": "Get Latent Count \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Get Latent Count \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Return the number of latents in an input as an INT
[-]
Inputs:
[-]
latents: The input latent
[-]
Outputs:
[-]
count: The number of latents in the input
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/latent", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_GetImageCount": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["count"], "name": "VHS_GetImageCount", "display_name": "Get Image Count \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Get Image Count \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Return the number of images in an input as an INT
[-]
Inputs:
[-]
images: The input image
[-]
Outputs:
[-]
count: The number of images in the input
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_GetMaskCount": {"input": {"required": {"mask": ["MASK"]}}, "input_order": {"required": ["mask"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["count"], "name": "VHS_GetMaskCount", "display_name": "Get Mask Count \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Get Mask Count \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Return the number of masks in an input as an INT
[-]
Inputs:
[-]
masks: The input mask
[-]
Outputs:
[-]
count: The number of masks in the input
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/mask", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_DuplicateLatents": {"input": {"required": {"latents": ["LATENT"], "multiply_by": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}]}}, "input_order": {"required": ["latents", "multiply_by"]}, "is_input_list": false, "output": ["LATENT", "INT"], "output_is_list": [false, false], "output_name": ["LATENT", "count"], "name": "VHS_DuplicateLatents", "display_name": "Repeat Latents \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Repeat Latents \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Append copies of a latent to itself so it repeats
[-]
Inputs:
[-]
latents: The latents to be repeated
[-]
Outputs:
[-]
LATENT: The latent with repeats
[-]
count: The number of latents in the output. Equal to the length of the input latent * multiply_by
[-]
Widgets:
[-]
multiply_by: Controls the number of times the latent should repeat. 1, the default, means no change.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/latent", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_DuplicateImages": {"input": {"required": {"images": ["IMAGE"], "multiply_by": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}]}}, "input_order": {"required": ["images", "multiply_by"]}, "is_input_list": false, "output": ["IMAGE", "INT"], "output_is_list": [false, false], "output_name": ["IMAGE", "count"], "name": "VHS_DuplicateImages", "display_name": "Repeat Images \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Repeat Images \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Append copies of a image to itself so it repeats
[-]
Inputs:
[-]
IMAGES: The image to be repeated
[-]
Outputs:
[-]
IMAGE: The image with repeats
[-]
count: The number of image in the output. Equal to the length of the input image * multiply_by
[-]
Widgets:
[-]
multiply_by: Controls the number of times the mask should repeat. 1, the default, means no change.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_DuplicateMasks": {"input": {"required": {"mask": ["MASK"], "multiply_by": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}]}}, "input_order": {"required": ["mask", "multiply_by"]}, "is_input_list": false, "output": ["MASK", "INT"], "output_is_list": [false, false], "output_name": ["MASK", "count"], "name": "VHS_DuplicateMasks", "display_name": "Repeat Masks \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Repeat Masks \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Append copies of a mask to itself so it repeats
[-]
Inputs:
[-]
masks: The masks to be repeated
[-]
Outputs:
[-]
LATENT: The mask with repeats
[-]
count: The number of mask in the output. Equal to the length of the input mask * multiply_by
[-]
Widgets:
[-]
multiply_by: Controls the number of times the mask should repeat. 1, the default, means no change.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/mask", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SelectEveryNthLatent": {"input": {"required": {"latents": ["LATENT"], "select_every_nth": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}], "skip_first_latents": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}]}}, "input_order": {"required": ["latents", "select_every_nth", "skip_first_latents"]}, "is_input_list": false, "output": ["LATENT", "INT"], "output_is_list": [false, false], "output_name": ["LATENT", "count"], "name": "VHS_SelectEveryNthLatent", "display_name": "Select Every Nth Latent \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Select Every Nth Latent \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Keep only 1 latent for every interval
[-]
Inputs:
[-]
latents: The input latent
[-]
Outputs:
[-]
LATENT: The output latents
[-]
count: The number of latents in the input
[-]
Widgets:
[-]
select_every_nth: The interval from which one frame is kept. 1 means no frames are skipped.
[-]
skip_first_latents: A number of frames which that is skipped from the start. This applies before select_every_nth. As a result, multiple copies of the node can each have a different skip_first_frames to divide the latent into groups
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/latent", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SelectEveryNthImage": {"input": {"required": {"images": ["IMAGE"], "select_every_nth": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}], "skip_first_images": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}]}}, "input_order": {"required": ["images", "select_every_nth", "skip_first_images"]}, "is_input_list": false, "output": ["IMAGE", "INT"], "output_is_list": [false, false], "output_name": ["IMAGE", "count"], "name": "VHS_SelectEveryNthImage", "display_name": "Select Every Nth Image \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Select Every Nth Image \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Keep only 1 image for every interval
[-]
Inputs:
[-]
images: The input image
[-]
Outputs:
[-]
IMAGE: The output images
[-]
count: The number of images in the input
[-]
Widgets:
[-]
select_every_nth: The interval from which one frame is kept. 1 means no frames are skipped.
[-]
skip_first_images: A number of frames which that is skipped from the start. This applies before select_every_nth. As a result, multiple copies of the node can each have a different skip_first_frames to divide the image into groups
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SelectEveryNthMask": {"input": {"required": {"mask": ["MASK"], "select_every_nth": ["INT", {"default": 1, "min": 1, "max": 9007199254740991, "step": 1}], "skip_first_masks": ["INT", {"default": 0, "min": 0, "max": 9007199254740991, "step": 1}]}}, "input_order": {"required": ["mask", "select_every_nth", "skip_first_masks"]}, "is_input_list": false, "output": ["MASK", "INT"], "output_is_list": [false, false], "output_name": ["MASK", "count"], "name": "VHS_SelectEveryNthMask", "display_name": "Select Every Nth Mask \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Select Every Nth Mask \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Keep only 1 mask for every interval
[-]
Inputs:
[-]
mask: The input mask
[-]
Outputs:
[-]
MASK: The output mask
[-]
count: The number of mask in the input
[-]
Widgets:
[-]
select_every_nth: The interval from which one frame is kept. 1 means no frames are skipped.
[-]
skip_first_mask: A number of frames which that is skipped from the start. This applies before select_every_nth. As a result, multiple copies of the node can each have a different skip_first_frames to divide the mask into groups
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/mask", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SelectLatents": {"input": {"required": {"latent": ["LATENT"], "indexes": ["STRING", {"default": "0"}], "err_if_missing": ["BOOLEAN", {"default": true}], "err_if_empty": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["latent", "indexes", "err_if_missing", "err_if_empty"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "VHS_SelectLatents", "display_name": "Select Latents \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Use comma-separated indexes to select items in the given order.\nSupports negative indexes, python-style ranges (end index excluded),\nas well as range step.\n\nAcceptable entries (assuming 16 items provided, so idxs 0 to 15 exist):\n0 -> Returns [0]\n-1 -> Returns [15]\n0, 1, 13 -> Returns [0, 1, 13]\n0:5, 13 -> Returns [0, 1, 2, 3, 4, 13]\n0:-1 -> Returns [0, 1, 2, ..., 13, 14]\n0:5:-1 -> Returns [4, 3, 2, 1, 0]\n0:5:2 -> Returns [0, 2, 4]\n::-1 -> Returns [15, 14, 13, ..., 2, 1, 0]\n", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/latent", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SelectImages": {"input": {"required": {"image": ["IMAGE"], "indexes": ["STRING", {"default": "0"}], "err_if_missing": ["BOOLEAN", {"default": true}], "err_if_empty": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["image", "indexes", "err_if_missing", "err_if_empty"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "VHS_SelectImages", "display_name": "Select Images \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Use comma-separated indexes to select items in the given order.\nSupports negative indexes, python-style ranges (end index excluded),\nas well as range step.\n\nAcceptable entries (assuming 16 items provided, so idxs 0 to 15 exist):\n0 -> Returns [0]\n-1 -> Returns [15]\n0, 1, 13 -> Returns [0, 1, 13]\n0:5, 13 -> Returns [0, 1, 2, 3, 4, 13]\n0:-1 -> Returns [0, 1, 2, ..., 13, 14]\n0:5:-1 -> Returns [4, 3, 2, 1, 0]\n0:5:2 -> Returns [0, 2, 4]\n::-1 -> Returns [15, 14, 13, ..., 2, 1, 0]\n", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SelectMasks": {"input": {"required": {"mask": ["MASK"], "indexes": ["STRING", {"default": "0"}], "err_if_missing": ["BOOLEAN", {"default": true}], "err_if_empty": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["mask", "indexes", "err_if_missing", "err_if_empty"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "VHS_SelectMasks", "display_name": "Select Masks \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Use comma-separated indexes to select items in the given order.\nSupports negative indexes, python-style ranges (end index excluded),\nas well as range step.\n\nAcceptable entries (assuming 16 items provided, so idxs 0 to 15 exist):\n0 -> Returns [0]\n-1 -> Returns [15]\n0, 1, 13 -> Returns [0, 1, 13]\n0:5, 13 -> Returns [0, 1, 2, 3, 4, 13]\n0:-1 -> Returns [0, 1, 2, ..., 13, 14]\n0:5:-1 -> Returns [4, 3, 2, 1, 0]\n0:5:2 -> Returns [0, 2, 4]\n::-1 -> Returns [15, 14, 13, ..., 2, 1, 0]\n", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62/mask", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_Unbatch": {"input": {"required": {"batched": ["*"]}}, "input_order": {"required": ["batched"]}, "is_input_list": true, "output": ["*"], "output_is_list": [false], "output_name": ["unbatched"], "name": "VHS_Unbatch", "display_name": "Unbatch \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Unbatch \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Unbatch a list of items into a single concatenated item
Useful for when you want a single video output from a complex workflow
Has no relation to the Meta Batch system of VHS
[-]
Inputs:
[-]
batched: Any input which may or may not be batched
[-]
Outputs:
[-]
unbatched: A single output element. Torch tensors are concatenated across dim 0, all other types are added which functions as concatenation for strings and arrays, but may give undesired results for other types
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VHS_SelectLatest": {"input": {"required": {"filename_prefix": ["STRING", {"default": "output/AnimateDiff", "vhs_path_extensions": []}], "filename_postfix": ["STRING", {"placeholder": ".webm"}]}}, "input_order": {"required": ["filename_prefix", "filename_postfix"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["Filename"], "name": "VHS_SelectLatest", "display_name": "Select Latest \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "description": "Select Latest \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62
Experimental virtual node to select the most recently modified file from a given folder
Assists in the creation of workflows where outputs from one execution are used elsewhere in subsequent executions.
[-]
Inputs:
[-]
filename_prefix: A path which can consist of a combination of folders and a prefix which candidate files must match
[-]
filename_postfix: A string which chich the selected file must end with. Useful for limiting to a target extension.
[-]
Outputs:
[-]
Filename: A string representing a file path to the most recently modified file.
", "python_module": "custom_nodes.ComfyUI-VideoHelperSuite", "category": "Video Helper Suite \ud83c\udfa5\ud83c\udd65\ud83c\udd57\ud83c\udd62", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "CropWithPadInfo": {"input": {"required": {"image": ["IMAGE"], "pad_info": ["ANY"]}}, "input_order": {"required": ["image", "pad_info"]}, "is_input_list": false, "output": ["IMAGE", "FLOAT"], "output_is_list": [false, false], "output_name": ["cropped_image", "scale_by"], "name": "CropWithPadInfo", "display_name": "Crop With Pad Info", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TextEncodeQwenImageEdit_lrzjason": {"input": {"required": {"clip": ["CLIP"], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"vae": ["VAE"], "image": ["IMAGE"], "enable_resize": ["BOOLEAN", {"default": true}], "resolution": [[2048, 1536, 1328, 1024, 768, 512], {"default": 1024}]}}, "input_order": {"required": ["clip", "prompt"], "optional": ["vae", "image", "enable_resize", "resolution"]}, "is_input_list": false, "output": ["CONDITIONING", "IMAGE", "LATENT"], "output_is_list": [false, false, false], "output_name": ["CONDITIONING", "IMAGE", "LATENT"], "name": "TextEncodeQwenImageEdit_lrzjason", "display_name": "TextEncodeQwenImageEdit lrzjason", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TextEncodeQwenImageEditPlus_lrzjason": {"input": {"required": {"clip": ["CLIP"], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"vae": ["VAE"], "image1": ["IMAGE"], "image2": ["IMAGE"], "image3": ["IMAGE"], "image4": ["IMAGE"], "image5": ["IMAGE"], "enable_resize": ["BOOLEAN", {"default": true}], "enable_vl_resize": ["BOOLEAN", {"default": true}], "skip_first_image_resize": ["BOOLEAN", {"default": false}], "upscale_method": [["lanczos", "bicubic", "area"]], "crop": [["disabled", "center"]], "instruction": ["STRING", {"multiline": true, "default": "Describe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate."}]}}, "input_order": {"required": ["clip", "prompt"], "optional": ["vae", "image1", "image2", "image3", "image4", "image5", "enable_resize", "enable_vl_resize", "skip_first_image_resize", "upscale_method", "crop", "instruction"]}, "is_input_list": false, "output": ["CONDITIONING", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "LATENT"], "output_is_list": [false, false, false, false, false, false, false], "output_name": ["conditioning", "image1", "image2", "image3", "image4", "image5", "latent"], "name": "TextEncodeQwenImageEditPlus_lrzjason", "display_name": "TextEncodeQwenImageEditPlus lrzjason", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TextEncodeQwenImageEditPlusAdvance_lrzjason": {"input": {"required": {"clip": ["CLIP"], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"vae": ["VAE"], "vl_resize_image1": ["IMAGE"], "vl_resize_image2": ["IMAGE"], "vl_resize_image3": ["IMAGE"], "not_resize_image1": ["IMAGE"], "not_resize_image2": ["IMAGE"], "not_resize_image3": ["IMAGE"], "target_size": [[1024, 1344, 1536, 2048, 768, 512], {"default": 1024}], "target_vl_size": [[392, 384], {"default": 384}], "upscale_method": [["lanczos", "bicubic", "area"]], "crop_method": [["pad", "center", "disabled"]], "instruction": ["STRING", {"multiline": true, "default": "Describe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate."}]}}, "input_order": {"required": ["clip", "prompt"], "optional": ["vae", "vl_resize_image1", "vl_resize_image2", "vl_resize_image3", "not_resize_image1", "not_resize_image2", "not_resize_image3", "target_size", "target_vl_size", "upscale_method", "crop_method", "instruction"]}, "is_input_list": false, "output": ["CONDITIONING", "LATENT", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "CONDITIONING", "ANY"], "output_is_list": [false, false, false, false, false, false, false, false, false, false], "output_name": ["conditioning_with_full_ref", "latent", "target_image1", "target_image2", "target_image3", "vl_resized_image1", "vl_resized_image2", "vl_resized_image3", "conditioning_with_first_ref", "pad_info"], "name": "TextEncodeQwenImageEditPlusAdvance_lrzjason", "display_name": "TextEncodeQwenImageEditPlusAdvance lrzjason", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TextEncodeQwenImageEditPlusPro_lrzjason": {"input": {"required": {"clip": ["CLIP"], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"vae": ["VAE"], "image1": ["IMAGE"], "image2": ["IMAGE"], "image3": ["IMAGE"], "image4": ["IMAGE"], "image5": ["IMAGE"], "vl_resize_indexs": ["STRING", {"default": "1,2,3"}], "main_image_index": ["INT", {"default": 1, "max": 5, "min": 1}], "target_size": [[1024, 1344, 1536, 2048, 768, 512], {"default": 1024}], "target_vl_size": [[392, 384], {"default": 384}], "upscale_method": [["lanczos", "bicubic", "area"]], "crop_method": [["pad", "center", "disabled"]], "instruction": ["STRING", {"multiline": true, "default": "Describe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate."}]}}, "input_order": {"required": ["clip", "prompt"], "optional": ["vae", "image1", "image2", "image3", "image4", "image5", "vl_resize_indexs", "main_image_index", "target_size", "target_vl_size", "upscale_method", "crop_method", "instruction"]}, "is_input_list": false, "output": ["CONDITIONING", "LATENT", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", "CONDITIONING", "ANY"], "output_is_list": [false, false, false, false, false, false, false, false, false], "output_name": ["conditioning_with_full_ref", "latent", "image1", "image2", "image3", "image4", "image5", "conditioning_with_main_ref", "pad_info"], "name": "TextEncodeQwenImageEditPlusPro_lrzjason", "display_name": "TextEncodeQwenImageEditPlusPro lrzjason", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TextEncodeQwenImageEditPlusCustom_lrzjason": {"input": {"required": {"clip": ["CLIP"], "vae": ["VAE"], "prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}], "configs": ["LIST", {"default": null}]}, "optional": {"return_full_refs_cond": ["BOOLEAN", {"default": true}], "instruction": ["STRING", {"multiline": true, "default": "Describe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate."}]}}, "input_order": {"required": ["clip", "vae", "prompt", "configs"], "optional": ["return_full_refs_cond", "instruction"]}, "is_input_list": false, "output": ["CONDITIONING", "LATENT", "ANY"], "output_is_list": [false, false, false], "output_name": ["conditioning", "latent", "custom_output"], "name": "TextEncodeQwenImageEditPlusCustom_lrzjason", "display_name": "TextEncodeQwenImageEditPlusCustom lrzjason", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "QwenEditOutputExtractor": {"input": {"required": {"custom_output": ["ANY"]}}, "input_order": {"required": ["custom_output"]}, "is_input_list": false, "output": ["ANY", "CONDITIONING", "CONDITIONING", "IMAGE", "LIST", "LIST", "LIST", "STRING", "STRING", "CONDITIONING", "MASK"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false], "output_name": ["pad_info", "full_refs_cond", "main_ref_cond", "main_image", "vae_images", "ref_latents", "vl_images", "full_prompt", "llama_template", "no_refs_cond", "mask"], "name": "QwenEditOutputExtractor", "display_name": "Qwen Edit Output Extractor", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "QwenEditConfigPreparer": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"configs": ["LIST", {"default": null, "tooltip": "Configs list"}], "to_ref": ["BOOLEAN", {"default": true, "tooltip": "Add image to reference latent"}], "ref_main_image": ["BOOLEAN", {"default": true, "tooltip": "Set image as main image which would return the latent as output."}], "ref_longest_edge": ["INT", {"default": 1024, "min": 64, "max": 4096, "step": 1, "tooltip": "Longest edge of the output latent"}], "ref_crop": [["pad", "center", "disabled"], {"default": "pad", "tooltip": "Crop method for reference image"}], "ref_upscale": [["lanczos", "bicubic", "area"], {"default": "lanczos", "tooltip": "Upscale method for reference image"}], "to_vl": ["BOOLEAN", {"default": true, "tooltip": "Add image to qwenvl 2.5 encode"}], "vl_resize": ["BOOLEAN", {"default": true, "tooltip": "Resize image before qwenvl 2.5 encode"}], "vl_target_size": ["INT", {"default": 384, "min": 384, "max": 2048, "tooltip": "Target size of the qwenvl 2.5 encode"}], "vl_crop": [["center", "disabled"], {"default": "center", "tooltip": "Crop method for reference image"}], "vl_upscale": [["lanczos", "bicubic", "area"], {"default": "lanczos", "tooltip": "Upscale method for reference image"}], "mask": ["MASK"]}}, "input_order": {"required": ["image"], "optional": ["configs", "to_ref", "ref_main_image", "ref_longest_edge", "ref_crop", "ref_upscale", "to_vl", "vl_resize", "vl_target_size", "vl_crop", "vl_upscale", "mask"]}, "is_input_list": false, "output": ["LIST", "ANY"], "output_is_list": [false, false], "output_name": ["configs", "config"], "name": "QwenEditConfigPreparer", "display_name": "Qwen Edit Config Preparer", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "QwenEditConfigJsonParser": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"configs": ["LIST", {"default": null, "tooltip": "Configs list"}], "config_json": ["STRING", {"default": "{\n \"to_ref\": true,\n \"ref_main_image\": false,\n \"ref_longest_edge\": 1024,\n \"ref_crop\": \"center\",\n \"ref_upscale\": \"lanczos\",\n \"to_vl\": true,\n \"vl_resize\": true,\n \"vl_target_size\": 384,\n \"vl_crop\": \"center\",\n \"vl_upscale\": \"bicubic\"\n}", "multiline": true, "tooltip": "Config JSON String"}], "mask": ["MASK"]}}, "input_order": {"required": ["image"], "optional": ["configs", "config_json", "mask"]}, "is_input_list": false, "output": ["LIST", "ANY"], "output_is_list": [false, false], "output_name": ["configs", "config"], "name": "QwenEditConfigJsonParser", "display_name": "Qwen Edit Config Json Parser", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "QwenEditListExtractor": {"input": {"required": {"items": ["LIST"], "index": ["INT", {"default": 0, "min": 0, "max": 1000000, "step": 1, "tooltip": "Index of the image"}]}}, "input_order": {"required": ["items", "index"]}, "is_input_list": false, "output": ["ANY"], "output_is_list": [false], "output_name": ["item"], "name": "QwenEditListExtractor", "display_name": "Qwen Edit List Extractor", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "QwenEditAny2Image": {"input": {"required": {"item": ["ANY"]}}, "input_order": {"required": ["item"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["item"], "name": "QwenEditAny2Image", "display_name": "Qwen Edit Any2Image", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "QwenEditAny2Latent": {"input": {"required": {"item": ["ANY"]}}, "input_order": {"required": ["item"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["item"], "name": "QwenEditAny2Latent", "display_name": "Qwen Edit Any2Latent", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "QwenEditAdaptiveLongestEdge": {"input": {"required": {"image": ["IMAGE"], "max_size": ["INT", {"default": 2048, "min": 512, "max": 4096, "step": 1, "tooltip": "When image is larger than max_size, it will be resized to under the max_size."}]}}, "input_order": {"required": ["image", "max_size"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["longest_edge"], "name": "QwenEditAdaptiveLongestEdge", "display_name": "Qwen Edit Adaptive Longest Edge", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "advanced/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LoadImageReturnFilename": {"input": {"required": {"image": [["2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"], {"image_upload": true}]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "STRING"], "output_is_list": [false, false, false], "output_name": ["image", "mask", "filename"], "name": "LoadImageReturnFilename", "display_name": "Load Image Return Filename", "description": "", "python_module": "custom_nodes.Comfyui-QwenEditUtils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LanPaint_KSampler": {"input": {"required": {"model": ["MODEL", {"tooltip": "The model used for denoising the input latent."}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "The random seed used for creating the noise."}], "steps": ["INT", {"default": 30, "min": 1, "max": 10000, "tooltip": "The number of steps used in the denoising process."}], "cfg": ["FLOAT", {"default": 5.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01, "tooltip": "The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality."}], "sampler_name": [["euler", "euler_ancestral", "heun", "heunpp2", "dpm_2", "dpm_2_ancestral", "dpm_fast", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "deis", "res_multistep", "res_multistep_ancestral", "gradient_estimation", "er_sde", "seeds_2", "seeds_3"], {"tooltip": "Recommended: euler."}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], {"default": "karras", "tooltip": "The scheduler controls how noise is gradually removed to form the image."}], "positive": ["CONDITIONING", {"tooltip": "The conditioning describing the attributes you want to include in the image."}], "negative": ["CONDITIONING", {"tooltip": "The conditioning describing the attributes you want to exclude from the image."}], "latent_image": ["LATENT", {"tooltip": "The latent image to denoise."}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling."}], "LanPaint_NumSteps": ["INT", {"default": 5, "min": 0, "max": 100, "tooltip": "The number of steps for the Langevin dynamics, representing the turns of thinking per step."}], "LanPaint_PromptMode": [["Image First", "Prompt First"], {"tooltip": "Image First: emphasis image quality, Prompt First: emphasis prompt following"}], "LanPaint_Info": ["STRING", {"default": "LanPaint KSampler. For more info, visit https://github.com/scraed/LanPaint. If you find it useful, please give a star \u2b50\ufe0f!", "multiline": true}], "Inpainting_mode": [["\ud83d\uddbc\ufe0f Image Inpainting", "\ud83c\udfac Video Inpainting"], {"default": "\ud83d\uddbc\ufe0f Image Inpainting", "tooltip": "Choose Image mode for photos or Video mode for video frames with temporal consistency"}]}}, "input_order": {"required": ["model", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "latent_image", "denoise", "LanPaint_NumSteps", "LanPaint_PromptMode", "LanPaint_Info", "Inpainting_mode"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LanPaint_KSampler", "display_name": "LanPaint KSampler", "description": "Uses the provided model, positive and negative conditioning to denoise the latent image.", "python_module": "custom_nodes.LanPaint", "category": "sampling", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The denoised latent."], "search_aliases": []}, "LanPaint_KSamplerAdvanced": {"input": {"required": {"model": ["MODEL"], "add_noise": [["enable", "disable"]], "noise_seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 30, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 5.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}], "sampler_name": [["euler", "euler_ancestral", "heun", "heunpp2", "dpm_2", "dpm_2_ancestral", "dpm_fast", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "deis", "res_multistep", "res_multistep_ancestral", "gradient_estimation", "er_sde", "seeds_2", "seeds_3"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "latent_image": ["LATENT"], "start_at_step": ["INT", {"default": 0, "min": 0, "max": 10000}], "end_at_step": ["INT", {"default": 10000, "min": 0, "max": 10000}], "return_with_leftover_noise": [["disable", "enable"]], "LanPaint_NumSteps": ["INT", {"default": 5, "min": 0, "max": 100, "tooltip": "The number of steps for the Langevin dynamics, representing the turns of thinking per step."}], "LanPaint_Lambda": ["FLOAT", {"default": 16.0, "min": 0.1, "max": 50.0, "step": 0.1, "round": 0.1, "tooltip": "The bidirectional guidance scale. Higher values align with known regions more closely, but may result in instability."}], "LanPaint_StepSize": ["FLOAT", {"default": 0.2, "min": 0.0001, "max": 1.0, "step": 0.01, "round": 0.001, "tooltip": "The step size for the Langevin dynamics. Higher values result in faster convergence but may be unstable."}], "LanPaint_Beta": ["FLOAT", {"default": 1.0, "min": 0.0001, "max": 5, "step": 0.1, "round": 0.1, "tooltip": "The step size ratio between masked / unmasked regions. Lower value can compensate high values of LanPaint_Lambda."}], "LanPaint_Friction": ["FLOAT", {"default": 15, "min": 0.0, "max": 50.0, "step": 0.1, "round": 0.1, "tooltip": "The friction parameter for fast langevin, lower values result in faster convergence but may be unstable."}], "LanPaint_PromptMode": [["Image First", "Prompt First"], {"tooltip": "Image First: emphasis image quality, Prompt First: emphasis prompt following"}], "LanPaint_EarlyStop": ["INT", {"default": 1, "min": 0, "max": 10000, "tooltip": "The number of steps to stop the LanPaint early, useful for preventing the image from irregular patterns."}], "LanPaint_Info": ["STRING", {"default": "LanPaint KSampler Adv. For more info, visit https://github.com/scraed/LanPaint. If you find it useful, please give a star \u2b50\ufe0f!", "multiline": true}], "Inpainting_mode": [["\ud83d\uddbc\ufe0f Image Inpainting", "\ud83c\udfac Video Inpainting"], {"default": "\ud83d\uddbc\ufe0f Image Inpainting", "tooltip": "Choose Image mode for photos or Video mode for video frames with temporal consistency"}], "LanPaint_InnerThreshold": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.0001, "round": 0.0001, "tooltip": "Early stop threshold for Langevin iterations based on semantic distance. 0.0 to disable. (Contributed by godnight10061)"}], "LanPaint_InnerPatience": ["INT", {"default": 1, "min": 1, "max": 100, "tooltip": "Number of consecutive steps below threshold required to stop. (Contributed by godnight10061)"}]}}, "input_order": {"required": ["model", "add_noise", "noise_seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "latent_image", "start_at_step", "end_at_step", "return_with_leftover_noise", "LanPaint_NumSteps", "LanPaint_Lambda", "LanPaint_StepSize", "LanPaint_Beta", "LanPaint_Friction", "LanPaint_PromptMode", "LanPaint_EarlyStop", "LanPaint_Info", "Inpainting_mode", "LanPaint_InnerThreshold", "LanPaint_InnerPatience"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LanPaint_KSamplerAdvanced", "display_name": "LanPaint KSampler (Advanced)", "description": "", "python_module": "custom_nodes.LanPaint", "category": "sampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LanPaint_SamplerCustom": {"input": {"required": {"model": ["MODEL"], "add_noise": ["BOOLEAN", {"default": true}], "noise_seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "sampler": ["SAMPLER"], "sigmas": ["SIGMAS"], "latent_image": ["LATENT"], "LanPaint_NumSteps": ["INT", {"default": 5, "min": 0, "max": 100, "tooltip": "Number of steps for Langevin dynamics, representing turns of thinking per step."}], "LanPaint_PromptMode": [["Image First", "Prompt First"], {"tooltip": "Image First: prioritizes image quality; Prompt First: prioritizes prompt adherence."}], "LanPaint_Info": ["STRING", {"default": "LanPaint Custom Sampler. For more info, visit https://github.com/scraed/LanPaint. If you find it useful, please give a star \u2b50\ufe0f!", "multiline": true}]}}, "input_order": {"required": ["model", "add_noise", "noise_seed", "cfg", "positive", "negative", "sampler", "sigmas", "latent_image", "LanPaint_NumSteps", "LanPaint_PromptMode", "LanPaint_Info"]}, "is_input_list": false, "output": ["LATENT", "LATENT"], "output_is_list": [false, false], "output_name": ["output", "denoised_output"], "name": "LanPaint_SamplerCustom", "display_name": "LanPaint Sampler Custom", "description": "", "python_module": "custom_nodes.LanPaint", "category": "sampling/custom_sampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LanPaint_SamplerCustomAdvanced": {"input": {"required": {"noise": ["NOISE"], "guider": ["GUIDER"], "sampler": ["SAMPLER"], "sigmas": ["SIGMAS"], "latent_image": ["LATENT"], "LanPaint_NumSteps": ["INT", {"default": 5, "min": 0, "max": 100, "tooltip": "Number of steps for Langevin dynamics, representing turns of thinking per step."}], "LanPaint_Lambda": ["FLOAT", {"default": 16.0, "min": 0.1, "max": 50.0, "step": 0.1, "tooltip": "Bidirectional guidance scale. Higher values align with known regions but may cause instability."}], "LanPaint_StepSize": ["FLOAT", {"default": 0.2, "min": 0.0001, "max": 1.0, "step": 0.01, "tooltip": "Step size for Langevin dynamics. Higher values speed convergence but may be unstable."}], "LanPaint_Beta": ["FLOAT", {"default": 1.0, "min": 0.0001, "max": 5.0, "step": 0.1, "tooltip": "Step size ratio between masked/unmasked regions. Lower values balance high Lambda."}], "LanPaint_Friction": ["FLOAT", {"default": 15.0, "min": 0.0, "max": 50.0, "step": 0.1, "tooltip": "Friction parameter for fast Langevin. Lower values speed convergence but may be unstable."}], "LanPaint_PromptMode": [["Image First", "Prompt First"], {"tooltip": "Image First: prioritizes image quality; Prompt First: prioritizes prompt adherence."}], "LanPaint_EarlyStop": ["INT", {"default": 1, "min": 0, "max": 10000, "tooltip": "Steps to stop LanPaint early, preventing irregular patterns."}], "LanPaint_Info": ["STRING", {"default": "LanPaint Custom Sampler Adv. For more info, visit https://github.com/scraed/LanPaint. If you find it useful, please give a star \u2b50\ufe0f!", "multiline": true}], "LanPaint_InnerThreshold": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.0001, "round": 0.0001, "tooltip": "Early stop threshold for Langevin iterations based on semantic distance. 0.0 to disable. (Contributed by godnight10061)"}], "LanPaint_InnerPatience": ["INT", {"default": 1, "min": 1, "max": 100, "tooltip": "Number of consecutive steps below threshold required to stop. (Contributed by godnight10061)"}]}}, "input_order": {"required": ["noise", "guider", "sampler", "sigmas", "latent_image", "LanPaint_NumSteps", "LanPaint_Lambda", "LanPaint_StepSize", "LanPaint_Beta", "LanPaint_Friction", "LanPaint_PromptMode", "LanPaint_EarlyStop", "LanPaint_Info", "LanPaint_InnerThreshold", "LanPaint_InnerPatience"]}, "is_input_list": false, "output": ["LATENT", "LATENT"], "output_is_list": [false, false], "output_name": ["output", "denoised_output"], "name": "LanPaint_SamplerCustomAdvanced", "display_name": "LanPaint Sampler Custom (Advanced)", "description": "", "python_module": "custom_nodes.LanPaint", "category": "sampling/custom_sampling", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LanPaint_MaskBlend": {"input": {"required": {"image1": ["IMAGE", {"tooltip": "Image before inpaint"}], "image2": ["IMAGE", {"tooltip": "Image after inpaint"}], "mask": ["MASK"], "blend_overlap": ["INT", {"default": 1, "min": 1, "max": 51, "step": 2, "tooltip": "The number of pixels to blend between the two images."}]}}, "input_order": {"required": ["image1", "image2", "mask", "blend_overlap"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "LanPaint_MaskBlend", "display_name": "LanPaint Mask Blend", "description": "", "python_module": "custom_nodes.LanPaint", "category": "image/postprocessing", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AdvancedLivePortrait": {"input": {"required": {"retargeting_eyes": ["FLOAT", {"default": 0, "min": 0, "max": 1, "step": 0.01}], "retargeting_mouth": ["FLOAT", {"default": 0, "min": 0, "max": 1, "step": 0.01}], "crop_factor": ["FLOAT", {"default": 2, "min": 1.5, "max": 3, "step": 0.1}], "turn_on": ["BOOLEAN", {"default": true}], "command": ["STRING", {"multiline": true, "default": ""}]}, "optional": {"src_images": ["IMAGE"], "motion_link": ["EDITOR_LINK"], "driving_images": ["IMAGE"]}}, "input_order": {"required": ["retargeting_eyes", "retargeting_mouth", "crop_factor", "turn_on", "command"], "optional": ["src_images", "motion_link", "driving_images"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "name": "AdvancedLivePortrait", "display_name": "Advanced Live Portrait (PHM)", "description": "", "python_module": "custom_nodes.comfyui-advancedliveportrait", "category": "AdvancedLivePortrait", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ExpressionEditor": {"input": {"required": {"rotate_pitch": ["FLOAT", {"default": 0, "min": -20, "max": 20, "step": 0.5, "display": "number"}], "rotate_yaw": ["FLOAT", {"default": 0, "min": -20, "max": 20, "step": 0.5, "display": "number"}], "rotate_roll": ["FLOAT", {"default": 0, "min": -20, "max": 20, "step": 0.5, "display": "number"}], "blink": ["FLOAT", {"default": 0, "min": -20, "max": 5, "step": 0.5, "display": "number"}], "eyebrow": ["FLOAT", {"default": 0, "min": -10, "max": 15, "step": 0.5, "display": "number"}], "wink": ["FLOAT", {"default": 0, "min": 0, "max": 25, "step": 0.5, "display": "number"}], "pupil_x": ["FLOAT", {"default": 0, "min": -15, "max": 15, "step": 0.5, "display": "number"}], "pupil_y": ["FLOAT", {"default": 0, "min": -15, "max": 15, "step": 0.5, "display": "number"}], "aaa": ["FLOAT", {"default": 0, "min": -30, "max": 120, "step": 1, "display": "number"}], "eee": ["FLOAT", {"default": 0, "min": -20, "max": 15, "step": 0.2, "display": "number"}], "woo": ["FLOAT", {"default": 0, "min": -20, "max": 15, "step": 0.2, "display": "number"}], "smile": ["FLOAT", {"default": 0, "min": -0.3, "max": 1.3, "step": 0.01, "display": "number"}], "src_ratio": ["FLOAT", {"default": 1, "min": 0, "max": 1, "step": 0.01, "display": "number"}], "sample_ratio": ["FLOAT", {"default": 1, "min": 0, "max": 1, "step": 0.01, "display": "number"}], "crop_factor": ["FLOAT", {"default": 2, "min": 1.5, "max": 3, "step": 0.1, "display": "number"}]}, "optional": {"src_image": ["IMAGE"], "motion_link": ["EDITOR_LINK"], "sample_image": ["IMAGE"], "add_exp": ["EXP_DATA"]}}, "input_order": {"required": ["rotate_pitch", "rotate_yaw", "rotate_roll", "blink", "eyebrow", "wink", "pupil_x", "pupil_y", "aaa", "eee", "woo", "smile", "src_ratio", "sample_ratio", "crop_factor"], "optional": ["src_image", "motion_link", "sample_image", "add_exp"]}, "is_input_list": false, "output": ["IMAGE", "EDITOR_LINK", "EXP_DATA"], "output_is_list": [false, false, false], "output_name": ["image", "motion_link", "save_exp"], "name": "ExpressionEditor", "display_name": "Expression Editor (PHM)", "description": "", "python_module": "custom_nodes.comfyui-advancedliveportrait", "category": "AdvancedLivePortrait", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "LoadExpData": {"input": {"required": {"file_name": [[]], "ratio": ["FLOAT", {"default": 1, "min": 0, "max": 1, "step": 0.01}]}}, "input_order": {"required": ["file_name", "ratio"]}, "is_input_list": false, "output": ["EXP_DATA"], "output_is_list": [false], "output_name": ["exp"], "name": "LoadExpData", "display_name": "Load Exp Data (PHM)", "description": "", "python_module": "custom_nodes.comfyui-advancedliveportrait", "category": "AdvancedLivePortrait", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SaveExpData": {"input": {"required": {"file_name": ["STRING", {"multiline": false, "default": ""}]}, "optional": {"save_exp": ["EXP_DATA"]}}, "input_order": {"required": ["file_name"], "optional": ["save_exp"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["file_name"], "name": "SaveExpData", "display_name": "Save Exp Data (PHM)", "description": "", "python_module": "custom_nodes.comfyui-advancedliveportrait", "category": "AdvancedLivePortrait", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ExpData": {"input": {"required": {"code1": ["INT", {"default": 0}], "value1": ["FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}], "code2": ["INT", {"default": 0}], "value2": ["FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}], "code3": ["INT", {"default": 0}], "value3": ["FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}], "code4": ["INT", {"default": 0}], "value4": ["FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}], "code5": ["INT", {"default": 0}], "value5": ["FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}]}, "optional": {"add_exp": ["EXP_DATA"]}}, "input_order": {"required": ["code1", "value1", "code2", "value2", "code3", "value3", "code4", "value4", "code5", "value5"], "optional": ["add_exp"]}, "is_input_list": false, "output": ["EXP_DATA"], "output_is_list": [false], "output_name": ["exp"], "name": "ExpData", "display_name": "ExpData", "description": "", "python_module": "custom_nodes.comfyui-advancedliveportrait", "category": "AdvancedLivePortrait", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PrintExpData:": {"input": {"required": {"cut_noise": ["FLOAT", {"default": 0, "min": 0, "max": 100, "step": 0.1}]}, "optional": {"exp": ["EXP_DATA"]}}, "input_order": {"required": ["cut_noise"], "optional": ["exp"]}, "is_input_list": false, "output": ["EXP_DATA"], "output_is_list": [false], "output_name": ["exp"], "name": "PrintExpData:", "display_name": "PrintExpData:", "description": "", "python_module": "custom_nodes.comfyui-advancedliveportrait", "category": "AdvancedLivePortrait", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SAMLoader": {"input": {"required": {"model_name": [["mobile_sam.pt", "sam2_hiera_base_plus.pt", "sam2_hiera_large.pt", "sam2_hiera_small.pt", "sam2_hiera_tiny.pt", "sam_vit_b_01ec64.pth", "sam_vit_h_4b8939.pth", "sam_vit_l_0b3195.pth"], {"tooltip": "The detection accuracy varies depending on the SAM model. ESAM can only be used if ComfyUI-YoloWorld-EfficientSAM is installed."}], "device_mode": [["AUTO", "Prefer GPU", "CPU"], {"tooltip": "AUTO: Only applicable when a GPU is available. It temporarily loads the SAM_MODEL into VRAM only when the detection function is used.\nPrefer GPU: Tries to keep the SAM_MODEL on the GPU whenever possible. This can be used when there is sufficient VRAM available.\nCPU: Always loads only on the CPU."}]}}, "input_order": {"required": ["model_name", "device_mode"]}, "is_input_list": false, "output": ["SAM_MODEL"], "output_is_list": [false], "output_name": ["SAM_MODEL"], "name": "SAMLoader", "display_name": "SAMLoader (Impact)", "description": "Load the SAM (Segment Anything) model. This can be used in places that utilize SAM detection functionality, such as SAMDetector or SimpleDetector.\nThe SAM detection functionality in Impact Pack must use the SAM_MODEL loaded through this node.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CLIPSegDetectorProvider": {"input": {"required": {"text": ["STRING", {"multiline": false, "tooltip": "Enter the targets to be detected, separated by commas"}], "blur": ["FLOAT", {"min": 0, "max": 15, "step": 0.1, "default": 7, "tooltip": "Blurs the detected mask"}], "threshold": ["FLOAT", {"min": 0, "max": 1, "step": 0.05, "default": 0.4, "tooltip": "Detects only areas that are certain above the threshold."}], "dilation_factor": ["INT", {"min": 0, "max": 10, "step": 1, "default": 4, "tooltip": "Dilates the detected mask."}]}}, "input_order": {"required": ["text", "blur", "threshold", "dilation_factor"]}, "is_input_list": false, "output": ["BBOX_DETECTOR"], "output_is_list": [false], "output_name": ["BBOX_DETECTOR"], "name": "CLIPSegDetectorProvider", "display_name": "CLIPSegDetectorProvider", "description": "Provides a detection function using CLIPSeg, which generates masks based on text prompts.\nTo use this node, the CLIPSeg custom node must be installed.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ONNXDetectorProvider": {"input": {"required": {"model_name": [[]]}}, "input_order": {"required": ["model_name"]}, "is_input_list": false, "output": ["BBOX_DETECTOR"], "output_is_list": [false], "output_name": ["BBOX_DETECTOR"], "name": "ONNXDetectorProvider", "display_name": "ONNXDetectorProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BitwiseAndMaskForEach": {"input": {"required": {"base_segs": ["SEGS"], "mask_segs": ["SEGS"]}}, "input_order": {"required": ["base_segs", "mask_segs"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "BitwiseAndMaskForEach", "display_name": "Pixelwise(SEGS & SEGS)", "description": "Retains only the overlapping areas between the masks included in base_segs and the mask regions of mask_segs. SEGS with no overlapping mask areas are filtered out.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SubtractMaskForEach": {"input": {"required": {"base_segs": ["SEGS"], "mask_segs": ["SEGS"]}}, "input_order": {"required": ["base_segs", "mask_segs"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "SubtractMaskForEach", "display_name": "Pixelwise(SEGS - SEGS)", "description": "Removes only the overlapping areas between the masks included in base_segs and the mask regions of mask_segs. SEGS with no overlapping mask areas are filtered out.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DetailerForEach": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "model": ["MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}], "clip": ["CLIP"], "vae": ["VAE"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"], "tiled_encode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "tiled_decode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["image", "segs", "model", "clip", "vae", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "denoise", "feather", "noise_mask", "force_inpaint", "wildcard", "cycle"], "optional": ["detailer_hook", "inpaint_model", "noise_mask_feather", "scheduler_func_opt", "tiled_encode", "tiled_decode"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "DetailerForEach", "display_name": "Detailer (SEGS)", "description": "It enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DetailerForEachAutoRetry": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "model": ["MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}], "clip": ["CLIP"], "vae": ["VAE"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}], "max_retries": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"], "tiled_encode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "tiled_decode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["image", "segs", "model", "clip", "vae", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "denoise", "feather", "noise_mask", "force_inpaint", "wildcard", "cycle", "max_retries"], "optional": ["detailer_hook", "inpaint_model", "noise_mask_feather", "scheduler_func_opt", "tiled_encode", "tiled_decode"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "DetailerForEachAutoRetry", "display_name": "Detailer (SEGS) with auto retry", "description": "It enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DetailerForEachDebug": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "model": ["MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}], "clip": ["CLIP"], "vae": ["VAE"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"], "tiled_encode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "tiled_decode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["image", "segs", "model", "clip", "vae", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "denoise", "feather", "noise_mask", "force_inpaint", "wildcard", "cycle"], "optional": ["detailer_hook", "inpaint_model", "noise_mask_feather", "scheduler_func_opt", "tiled_encode", "tiled_decode"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, true, true, true, true], "output_name": ["image", "cropped", "cropped_refined", "cropped_refined_alpha", "cnet_images"], "name": "DetailerForEachDebug", "display_name": "DetailerDebug (SEGS)", "description": "It enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DetailerForEachPipe": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "basic_pipe": ["BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "refiner_ratio": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"], "refiner_basic_pipe_opt": ["BASIC_PIPE"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"], "tiled_encode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "tiled_decode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["image", "segs", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "feather", "noise_mask", "force_inpaint", "basic_pipe", "wildcard", "refiner_ratio", "cycle"], "optional": ["detailer_hook", "refiner_basic_pipe_opt", "inpaint_model", "noise_mask_feather", "scheduler_func_opt", "tiled_encode", "tiled_decode"]}, "is_input_list": false, "output": ["IMAGE", "SEGS", "BASIC_PIPE", "IMAGE"], "output_is_list": [false, false, false, true], "output_name": ["image", "segs", "basic_pipe", "cnet_images"], "name": "DetailerForEachPipe", "display_name": "Detailer (SEGS/pipe)", "description": "It enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DetailerForEachDebugPipe": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "basic_pipe": ["BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "refiner_ratio": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"], "refiner_basic_pipe_opt": ["BASIC_PIPE"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"], "tiled_encode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "tiled_decode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["image", "segs", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "feather", "noise_mask", "force_inpaint", "basic_pipe", "wildcard", "refiner_ratio", "cycle"], "optional": ["detailer_hook", "refiner_basic_pipe_opt", "inpaint_model", "noise_mask_feather", "scheduler_func_opt", "tiled_encode", "tiled_decode"]}, "is_input_list": false, "output": ["IMAGE", "SEGS", "BASIC_PIPE", "IMAGE", "IMAGE", "IMAGE", "IMAGE"], "output_is_list": [false, false, false, true, true, true, true], "output_name": ["image", "segs", "basic_pipe", "cropped", "cropped_refined", "cropped_refined_alpha", "cnet_images"], "name": "DetailerForEachDebugPipe", "display_name": "DetailerDebug (SEGS/pipe)", "description": "It enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DetailerForEachPipeForAnimateDiff": {"input": {"required": {"image_frames": ["IMAGE"], "segs": ["SEGS"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "basic_pipe": ["BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}], "refiner_ratio": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"], "refiner_basic_pipe_opt": ["BASIC_PIPE"], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"]}}, "input_order": {"required": ["image_frames", "segs", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "feather", "basic_pipe", "refiner_ratio"], "optional": ["detailer_hook", "refiner_basic_pipe_opt", "noise_mask_feather", "scheduler_func_opt"]}, "is_input_list": false, "output": ["IMAGE", "SEGS", "BASIC_PIPE", "IMAGE"], "output_is_list": [false, false, false, true], "output_name": ["image", "segs", "basic_pipe", "cnet_images"], "name": "DetailerForEachPipeForAnimateDiff", "display_name": "Detailer For Video (SEGS/pipe)", "description": "This node enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.\nThis node is a specialized detailer node for enhancing video details, such as in AnimateDiff. It can handle cases where the masks contained in SEGS serve as batch masks spanning multiple frames.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SAMDetectorCombined": {"input": {"required": {"sam_model": ["SAM_MODEL", {"tooltip": "Segment Anything Model for Silhouette Detection.\nBe sure to use the SAM_MODEL loaded through the SAMLoader (Impact) node as input."}], "segs": ["SEGS", {"tooltip": "This is the segment information detected by the detector.\nIt refines the Mask through the SAM (Segment Anything) detector for all areas pointed to by SEGS, and combines all Masks to return as a single Mask."}], "image": ["IMAGE", {"tooltip": "It is assumed that segs contains only the information about the detected areas, and does not include the image. SAM (Segment Anything) operates by referencing this image."}], "detection_hint": [["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"], {"tooltip": "It is recommended to use only center-1.\nWhen refining the mask of SEGS with the SAM (Segment Anything) model, center-1 uses only the rectangular area of SEGS and a single point at the exact center as hints.\nOther options were added during the experimental stage and do not work well."}], "dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1, "tooltip": "Set the value to dilate the result mask. If the value is negative, it erodes the mask."}], "threshold": ["FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Set the sensitivity threshold for the mask detected by SAM (Segment Anything). A higher value generates a more specific mask with a narrower range. For example, when pointing to a person's area, it might detect clothes, which is a narrower range, instead of the entire person."}], "bbox_expansion": ["INT", {"default": 0, "min": 0, "max": 1000, "step": 1, "tooltip": "When performing SAM (Segment Anything) detection within the SEGS area, the rectangular area of SEGS is expanded and used as a hint."}], "mask_hint_threshold": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "When detection_hint is mask-area, the mask of SEGS is used as a point hint for SAM (Segment Anything).\nIn this case, only the areas of the mask with brightness values equal to or greater than mask_hint_threshold are used as hints."}], "mask_hint_use_negative": [["False", "Small", "Outter"], {"tooltip": "When detecting with SAM (Segment Anything), negative hints are applied as follows:\nSmall: When the SEGS is smaller than 10 pixels in size\nOuter: Sampling the image area outside the SEGS region at regular intervals"}]}}, "input_order": {"required": ["sam_model", "segs", "image", "detection_hint", "dilation", "threshold", "bbox_expansion", "mask_hint_threshold", "mask_hint_use_negative"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "SAMDetectorCombined", "display_name": "SAMDetector (combined)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SAMDetectorSegmented": {"input": {"required": {"sam_model": ["SAM_MODEL", {"tooltip": "Segment Anything Model for Silhouette Detection.\nBe sure to use the SAM_MODEL loaded through the SAMLoader (Impact) node as input."}], "segs": ["SEGS", {"tooltip": "This is the segment information detected by the detector.\nFor the SEGS region, the masks detected by SAM (Segment Anything) are created as a unified mask and a batch of individual masks."}], "image": ["IMAGE", {"tooltip": "It is assumed that segs contains only the information about the detected areas, and does not include the image. SAM (Segment Anything) operates by referencing this image."}], "detection_hint": [["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"], {"tooltip": "It is recommended to use only center-1.\nWhen refining the mask of SEGS with the SAM (Segment Anything) model, center-1 uses only the rectangular area of SEGS and a single point at the exact center as hints.\nOther options were added during the experimental stage and do not work well."}], "dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1, "tooltip": "Set the value to dilate the result mask. If the value is negative, it erodes the mask."}], "threshold": ["FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}], "bbox_expansion": ["INT", {"default": 0, "min": 0, "max": 1000, "step": 1, "tooltip": "When performing SAM (Segment Anything) detection within the SEGS area, the rectangular area of SEGS is expanded and used as a hint."}], "mask_hint_threshold": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "When detection_hint is mask-area, the mask of SEGS is used as a point hint for SAM (Segment Anything).\nIn this case, only the areas of the mask with brightness values equal to or greater than mask_hint_threshold are used as hints."}], "mask_hint_use_negative": [["False", "Small", "Outter"], {"tooltip": "When detecting with SAM (Segment Anything), negative hints are applied as follows:\nSmall: When the SEGS is smaller than 10 pixels in size\nOuter: Sampling the image area outside the SEGS region at regular intervals"}]}}, "input_order": {"required": ["sam_model", "segs", "image", "detection_hint", "dilation", "threshold", "bbox_expansion", "mask_hint_threshold", "mask_hint_use_negative"]}, "is_input_list": false, "output": ["MASK", "MASK"], "output_is_list": [false, false], "output_name": ["combined_mask", "batch_masks"], "name": "SAMDetectorSegmented", "display_name": "SAMDetector (segmented)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FaceDetailer": {"input": {"required": {"image": ["IMAGE"], "model": ["MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}], "clip": ["CLIP"], "vae": ["VAE"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "bbox_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "bbox_dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}], "bbox_crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}], "sam_detection_hint": [["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"]], "sam_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "sam_threshold": ["FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}], "sam_bbox_expansion": ["INT", {"default": 0, "min": 0, "max": 1000, "step": 1}], "sam_mask_hint_threshold": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}], "sam_mask_hint_use_negative": [["False", "Small", "Outter"]], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "bbox_detector": ["BBOX_DETECTOR"], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"sam_model_opt": ["SAM_MODEL"], "segm_detector_opt": ["SEGM_DETECTOR"], "detailer_hook": ["DETAILER_HOOK"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"], "tiled_encode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "tiled_decode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["image", "model", "clip", "vae", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "denoise", "feather", "noise_mask", "force_inpaint", "bbox_threshold", "bbox_dilation", "bbox_crop_factor", "sam_detection_hint", "sam_dilation", "sam_threshold", "sam_bbox_expansion", "sam_mask_hint_threshold", "sam_mask_hint_use_negative", "drop_size", "bbox_detector", "wildcard", "cycle"], "optional": ["sam_model_opt", "segm_detector_opt", "detailer_hook", "inpaint_model", "noise_mask_feather", "scheduler_func_opt", "tiled_encode", "tiled_decode"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE"], "output_is_list": [false, true, true, false, false, true], "output_name": ["image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images"], "name": "FaceDetailer", "display_name": "FaceDetailer", "description": "This node enhances details by automatically detecting specific objects in the input image using detection models (bbox, segm, sam) and regenerating the image by enlarging the detected area based on the guide size.\nAlthough this node is specialized to simplify the commonly used facial detail enhancement workflow, it can also be used for various automatic inpainting purposes depending on the detection model.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Simple", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FaceDetailerPipe": {"input": {"required": {"image": ["IMAGE"], "detailer_pipe": ["DETAILER_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the detailer_pipe, the inference stage is skipped."}], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "bbox_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "bbox_dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}], "bbox_crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}], "sam_detection_hint": [["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"]], "sam_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "sam_threshold": ["FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}], "sam_bbox_expansion": ["INT", {"default": 0, "min": 0, "max": 1000, "step": 1}], "sam_mask_hint_threshold": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}], "sam_mask_hint_use_negative": [["False", "Small", "Outter"]], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "refiner_ratio": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"], "tiled_encode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "tiled_decode": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["image", "detailer_pipe", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "feather", "noise_mask", "force_inpaint", "bbox_threshold", "bbox_dilation", "bbox_crop_factor", "sam_detection_hint", "sam_dilation", "sam_threshold", "sam_bbox_expansion", "sam_mask_hint_threshold", "sam_mask_hint_use_negative", "drop_size", "refiner_ratio", "cycle"], "optional": ["inpaint_model", "noise_mask_feather", "scheduler_func_opt", "tiled_encode", "tiled_decode"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE"], "output_is_list": [false, true, true, false, false, true], "output_name": ["image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images"], "name": "FaceDetailerPipe", "display_name": "FaceDetailer (pipe)", "description": "This node enhances details by automatically detecting specific objects in the input image using detection models (bbox, segm, sam) and regenerating the image by enlarging the detected area based on the guide size.\nAlthough this node is specialized to simplify the commonly used facial detail enhancement workflow, it can also be used for various automatic inpainting purposes depending on the detection model.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Simple", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaskDetailerPipe": {"input": {"required": {"image": ["IMAGE"], "mask": ["MASK"], "basic_pipe": ["BASIC_PIPE"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "mask bbox", "label_off": "crop region"}], "max_size": ["FLOAT", {"default": 1024, "min": 64, "max": 16384, "step": 8}], "mask_mode": ["BOOLEAN", {"default": true, "label_on": "masked only", "label_off": "whole"}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "refiner_ratio": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 100}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"refiner_basic_pipe_opt": ["BASIC_PIPE"], "detailer_hook": ["DETAILER_HOOK"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "bbox_fill": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "contour_fill": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "scheduler_func_opt": ["SCHEDULER_FUNC"]}}, "input_order": {"required": ["image", "mask", "basic_pipe", "guide_size", "guide_size_for", "max_size", "mask_mode", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "feather", "crop_factor", "drop_size", "refiner_ratio", "batch_size", "cycle"], "optional": ["refiner_basic_pipe_opt", "detailer_hook", "inpaint_model", "noise_mask_feather", "bbox_fill", "contour_fill", "scheduler_func_opt"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE", "BASIC_PIPE", "BASIC_PIPE"], "output_is_list": [false, true, true, false, false], "output_name": ["image", "cropped_refined", "cropped_enhanced_alpha", "basic_pipe", "refiner_basic_pipe_opt"], "name": "MaskDetailerPipe", "display_name": "MaskDetailer (pipe)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ToDetailerPipe": {"input": {"required": {"model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "bbox_detector": ["BBOX_DETECTOR"], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "Select to add LoRA": [["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "Select to add Wildcard": [["Select the Wildcard to add to the text"]]}, "optional": {"sam_model_opt": ["SAM_MODEL"], "segm_detector_opt": ["SEGM_DETECTOR"], "detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["model", "clip", "vae", "positive", "negative", "bbox_detector", "wildcard", "Select to add LoRA", "Select to add Wildcard"], "optional": ["sam_model_opt", "segm_detector_opt", "detailer_hook"]}, "is_input_list": false, "output": ["DETAILER_PIPE"], "output_is_list": [false], "output_name": ["detailer_pipe"], "name": "ToDetailerPipe", "display_name": "ToDetailerPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ToDetailerPipeSDXL": {"input": {"required": {"model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "refiner_model": ["MODEL"], "refiner_clip": ["CLIP"], "refiner_positive": ["CONDITIONING"], "refiner_negative": ["CONDITIONING"], "bbox_detector": ["BBOX_DETECTOR"], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "Select to add LoRA": [["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "Select to add Wildcard": [["Select the Wildcard to add to the text"]]}, "optional": {"sam_model_opt": ["SAM_MODEL"], "segm_detector_opt": ["SEGM_DETECTOR"], "detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["model", "clip", "vae", "positive", "negative", "refiner_model", "refiner_clip", "refiner_positive", "refiner_negative", "bbox_detector", "wildcard", "Select to add LoRA", "Select to add Wildcard"], "optional": ["sam_model_opt", "segm_detector_opt", "detailer_hook"]}, "is_input_list": false, "output": ["DETAILER_PIPE"], "output_is_list": [false], "output_name": ["detailer_pipe"], "name": "ToDetailerPipeSDXL", "display_name": "ToDetailerPipeSDXL", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FromDetailerPipe": {"input": {"required": {"detailer_pipe": ["DETAILER_PIPE"]}}, "input_order": {"required": ["detailer_pipe"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK"], "output_is_list": [false, false, false, false, false, false, false, false, false], "output_name": ["model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook"], "name": "FromDetailerPipe", "display_name": "FromDetailerPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FromDetailerPipe_v2": {"input": {"required": {"detailer_pipe": ["DETAILER_PIPE"]}}, "input_order": {"required": ["detailer_pipe"]}, "is_input_list": false, "output": ["DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK"], "output_is_list": [false, false, false, false, false, false, false, false, false, false], "output_name": ["detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook"], "name": "FromDetailerPipe_v2", "display_name": "FromDetailerPipe_v2", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FromDetailerPipeSDXL": {"input": {"required": {"detailer_pipe": ["DETAILER_PIPE"]}}, "input_order": {"required": ["detailer_pipe"]}, "is_input_list": false, "output": ["DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK", "MODEL", "CLIP", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook", "refiner_model", "refiner_clip", "refiner_positive", "refiner_negative"], "name": "FromDetailerPipeSDXL", "display_name": "FromDetailer (SDXL/pipe)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AnyPipeToBasic": {"input": {"required": {"any_pipe": ["*"]}}, "input_order": {"required": ["any_pipe"]}, "is_input_list": false, "output": ["BASIC_PIPE"], "output_is_list": [false], "output_name": ["basic_pipe"], "name": "AnyPipeToBasic", "display_name": "Any PIPE -> BasicPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ToBasicPipe": {"input": {"required": {"model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"]}}, "input_order": {"required": ["model", "clip", "vae", "positive", "negative"]}, "is_input_list": false, "output": ["BASIC_PIPE"], "output_is_list": [false], "output_name": ["basic_pipe"], "name": "ToBasicPipe", "display_name": "ToBasicPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FromBasicPipe": {"input": {"required": {"basic_pipe": ["BASIC_PIPE"]}}, "input_order": {"required": ["basic_pipe"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false, false, false], "output_name": ["model", "clip", "vae", "positive", "negative"], "name": "FromBasicPipe", "display_name": "FromBasicPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FromBasicPipe_v2": {"input": {"required": {"basic_pipe": ["BASIC_PIPE"]}}, "input_order": {"required": ["basic_pipe"]}, "is_input_list": false, "output": ["BASIC_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING"], "output_is_list": [false, false, false, false, false, false], "output_name": ["basic_pipe", "model", "clip", "vae", "positive", "negative"], "name": "FromBasicPipe_v2", "display_name": "FromBasicPipe_v2", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BasicPipeToDetailerPipe": {"input": {"required": {"basic_pipe": ["BASIC_PIPE"], "bbox_detector": ["BBOX_DETECTOR"], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "Select to add LoRA": [["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "Select to add Wildcard": [["Select the Wildcard to add to the text"]]}, "optional": {"sam_model_opt": ["SAM_MODEL"], "segm_detector_opt": ["SEGM_DETECTOR"], "detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["basic_pipe", "bbox_detector", "wildcard", "Select to add LoRA", "Select to add Wildcard"], "optional": ["sam_model_opt", "segm_detector_opt", "detailer_hook"]}, "is_input_list": false, "output": ["DETAILER_PIPE"], "output_is_list": [false], "output_name": ["detailer_pipe"], "name": "BasicPipeToDetailerPipe", "display_name": "BasicPipe -> DetailerPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BasicPipeToDetailerPipeSDXL": {"input": {"required": {"base_basic_pipe": ["BASIC_PIPE"], "refiner_basic_pipe": ["BASIC_PIPE"], "bbox_detector": ["BBOX_DETECTOR"], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "Select to add LoRA": [["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "Select to add Wildcard": [["Select the Wildcard to add to the text"]]}, "optional": {"sam_model_opt": ["SAM_MODEL"], "segm_detector_opt": ["SEGM_DETECTOR"], "detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["base_basic_pipe", "refiner_basic_pipe", "bbox_detector", "wildcard", "Select to add LoRA", "Select to add Wildcard"], "optional": ["sam_model_opt", "segm_detector_opt", "detailer_hook"]}, "is_input_list": false, "output": ["DETAILER_PIPE"], "output_is_list": [false], "output_name": ["detailer_pipe"], "name": "BasicPipeToDetailerPipeSDXL", "display_name": "BasicPipe -> DetailerPipe (SDXL)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DetailerPipeToBasicPipe": {"input": {"required": {"detailer_pipe": ["DETAILER_PIPE"]}}, "input_order": {"required": ["detailer_pipe"]}, "is_input_list": false, "output": ["BASIC_PIPE", "BASIC_PIPE"], "output_is_list": [false, false], "output_name": ["base_basic_pipe", "refiner_basic_pipe"], "name": "DetailerPipeToBasicPipe", "display_name": "DetailerPipe -> BasicPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "EditBasicPipe": {"input": {"required": {"basic_pipe": ["BASIC_PIPE"]}, "optional": {"model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"]}}, "input_order": {"required": ["basic_pipe"], "optional": ["model", "clip", "vae", "positive", "negative"]}, "is_input_list": false, "output": ["BASIC_PIPE"], "output_is_list": [false], "output_name": ["basic_pipe"], "name": "EditBasicPipe", "display_name": "Edit BasicPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "EditDetailerPipe": {"input": {"required": {"detailer_pipe": ["DETAILER_PIPE"], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "Select to add LoRA": [["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "Select to add Wildcard": [["Select the Wildcard to add to the text"]]}, "optional": {"model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "bbox_detector": ["BBOX_DETECTOR"], "sam_model": ["SAM_MODEL"], "segm_detector": ["SEGM_DETECTOR"], "detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["detailer_pipe", "wildcard", "Select to add LoRA", "Select to add Wildcard"], "optional": ["model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model", "segm_detector", "detailer_hook"]}, "is_input_list": false, "output": ["DETAILER_PIPE"], "output_is_list": [false], "output_name": ["detailer_pipe"], "name": "EditDetailerPipe", "display_name": "Edit DetailerPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "EditDetailerPipeSDXL": {"input": {"required": {"detailer_pipe": ["DETAILER_PIPE"], "wildcard": ["STRING", {"multiline": true, "dynamicPrompts": false}], "Select to add LoRA": [["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "Select to add Wildcard": [["Select the Wildcard to add to the text"]]}, "optional": {"model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "refiner_model": ["MODEL"], "refiner_clip": ["CLIP"], "refiner_positive": ["CONDITIONING"], "refiner_negative": ["CONDITIONING"], "bbox_detector": ["BBOX_DETECTOR"], "sam_model": ["SAM_MODEL"], "segm_detector": ["SEGM_DETECTOR"], "detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["detailer_pipe", "wildcard", "Select to add LoRA", "Select to add Wildcard"], "optional": ["model", "clip", "vae", "positive", "negative", "refiner_model", "refiner_clip", "refiner_positive", "refiner_negative", "bbox_detector", "sam_model", "segm_detector", "detailer_hook"]}, "is_input_list": false, "output": ["DETAILER_PIPE"], "output_is_list": [false], "output_name": ["detailer_pipe"], "name": "EditDetailerPipeSDXL", "display_name": "Edit DetailerPipe (SDXL)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Pipe", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LatentPixelScale": {"input": {"required": {"samples": ["LATENT"], "scale_method": [["nearest-exact", "bilinear", "lanczos", "area"]], "scale_factor": ["FLOAT", {"default": 1.5, "min": 0.1, "max": 10000, "step": 0.05}], "vae": ["VAE"], "use_tiled_vae": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}, "optional": {"upscale_model_opt": ["UPSCALE_MODEL"]}}, "input_order": {"required": ["samples", "scale_method", "scale_factor", "vae", "use_tiled_vae"], "optional": ["upscale_model_opt"]}, "is_input_list": false, "output": ["LATENT", "IMAGE"], "output_is_list": [false, false], "output_name": ["LATENT", "IMAGE"], "name": "LatentPixelScale", "display_name": "Latent Scale (on Pixel Space)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PixelKSampleUpscalerProvider": {"input": {"required": {"scale_method": [["nearest-exact", "bilinear", "lanczos", "area"]], "model": ["MODEL"], "vae": ["VAE"], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "use_tiled_vae": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "tile_size": ["INT", {"default": 512, "min": 320, "max": 4096, "step": 64}]}, "optional": {"upscale_model_opt": ["UPSCALE_MODEL"], "pk_hook_opt": ["PK_HOOK"], "scheduler_func_opt": ["SCHEDULER_FUNC"]}}, "input_order": {"required": ["scale_method", "model", "vae", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "denoise", "use_tiled_vae", "tile_size"], "optional": ["upscale_model_opt", "pk_hook_opt", "scheduler_func_opt"]}, "is_input_list": false, "output": ["UPSCALER"], "output_is_list": [false], "output_name": ["UPSCALER"], "name": "PixelKSampleUpscalerProvider", "display_name": "PixelKSampleUpscalerProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PixelKSampleUpscalerProviderPipe": {"input": {"required": {"scale_method": [["nearest-exact", "bilinear", "lanczos", "area"]], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "use_tiled_vae": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "basic_pipe": ["BASIC_PIPE"], "tile_size": ["INT", {"default": 512, "min": 320, "max": 4096, "step": 64}]}, "optional": {"upscale_model_opt": ["UPSCALE_MODEL"], "pk_hook_opt": ["PK_HOOK"], "scheduler_func_opt": ["SCHEDULER_FUNC"], "tile_cnet_opt": ["CONTROL_NET"], "tile_cnet_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["scale_method", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "use_tiled_vae", "basic_pipe", "tile_size"], "optional": ["upscale_model_opt", "pk_hook_opt", "scheduler_func_opt", "tile_cnet_opt", "tile_cnet_strength"]}, "is_input_list": false, "output": ["UPSCALER"], "output_is_list": [false], "output_name": ["UPSCALER"], "name": "PixelKSampleUpscalerProviderPipe", "display_name": "PixelKSampleUpscalerProviderPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "IterativeLatentUpscale": {"input": {"required": {"samples": ["LATENT"], "upscale_factor": ["FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}], "steps": ["INT", {"default": 3, "min": 1, "max": 10000, "step": 1}], "temp_prefix": ["STRING", {"default": ""}], "upscaler": ["UPSCALER"], "step_mode": [["simple", "geometric"], {"default": "simple"}]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["samples", "upscale_factor", "steps", "temp_prefix", "upscaler", "step_mode"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["LATENT", "VAE"], "output_is_list": [false, false], "output_name": ["latent", "vae"], "name": "IterativeLatentUpscale", "display_name": "Iterative Upscale (Latent/on Pixel Space)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "IterativeImageUpscale": {"input": {"required": {"pixels": ["IMAGE"], "upscale_factor": ["FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}], "steps": ["INT", {"default": 3, "min": 1, "max": 10000, "step": 1}], "temp_prefix": ["STRING", {"default": ""}], "upscaler": ["UPSCALER"], "vae": ["VAE"], "step_mode": [["simple", "geometric"], {"default": "simple"}]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["pixels", "upscale_factor", "steps", "temp_prefix", "upscaler", "vae", "step_mode"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "IterativeImageUpscale", "display_name": "Iterative Upscale (Image)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PixelTiledKSampleUpscalerProvider": {"input": {"required": {"scale_method": [["nearest-exact", "bilinear", "lanczos", "area"]], "model": ["MODEL"], "vae": ["VAE"], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "tile_width": ["INT", {"default": 512, "min": 320, "max": 16384, "step": 64}], "tile_height": ["INT", {"default": 512, "min": 320, "max": 16384, "step": 64}], "tiling_strategy": [["random", "padded", "simple"]]}, "optional": {"upscale_model_opt": ["UPSCALE_MODEL"], "pk_hook_opt": ["PK_HOOK"], "tile_cnet_opt": ["CONTROL_NET"], "tile_cnet_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "overlap": ["INT", {"default": 64, "min": 0, "max": 4096, "step": 32}]}}, "input_order": {"required": ["scale_method", "model", "vae", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "denoise", "tile_width", "tile_height", "tiling_strategy"], "optional": ["upscale_model_opt", "pk_hook_opt", "tile_cnet_opt", "tile_cnet_strength", "overlap"]}, "is_input_list": false, "output": ["UPSCALER"], "output_is_list": [false], "output_name": ["UPSCALER"], "name": "PixelTiledKSampleUpscalerProvider", "display_name": "PixelTiledKSampleUpscalerProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PixelTiledKSampleUpscalerProviderPipe": {"input": {"required": {"scale_method": [["nearest-exact", "bilinear", "lanczos", "area"]], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "tile_width": ["INT", {"default": 512, "min": 320, "max": 16384, "step": 64}], "tile_height": ["INT", {"default": 512, "min": 320, "max": 16384, "step": 64}], "tiling_strategy": [["random", "padded", "simple"]], "basic_pipe": ["BASIC_PIPE"]}, "optional": {"upscale_model_opt": ["UPSCALE_MODEL"], "pk_hook_opt": ["PK_HOOK"], "tile_cnet_opt": ["CONTROL_NET"], "tile_cnet_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["scale_method", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "tile_width", "tile_height", "tiling_strategy", "basic_pipe"], "optional": ["upscale_model_opt", "pk_hook_opt", "tile_cnet_opt", "tile_cnet_strength"]}, "is_input_list": false, "output": ["UPSCALER"], "output_is_list": [false], "output_name": ["UPSCALER"], "name": "PixelTiledKSampleUpscalerProviderPipe", "display_name": "PixelTiledKSampleUpscalerProviderPipe", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TwoSamplersForMaskUpscalerProvider": {"input": {"required": {"scale_method": [["nearest-exact", "bilinear", "lanczos", "area"]], "full_sample_schedule": [["none", "interleave1", "interleave2", "interleave3", "last1", "last2", "interleave1+last1", "interleave2+last1", "interleave3+last1"]], "use_tiled_vae": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "base_sampler": ["KSAMPLER"], "mask_sampler": ["KSAMPLER"], "mask": ["MASK"], "vae": ["VAE"], "tile_size": ["INT", {"default": 512, "min": 320, "max": 4096, "step": 64}]}, "optional": {"full_sampler_opt": ["KSAMPLER"], "upscale_model_opt": ["UPSCALE_MODEL"], "pk_hook_base_opt": ["PK_HOOK"], "pk_hook_mask_opt": ["PK_HOOK"], "pk_hook_full_opt": ["PK_HOOK"]}}, "input_order": {"required": ["scale_method", "full_sample_schedule", "use_tiled_vae", "base_sampler", "mask_sampler", "mask", "vae", "tile_size"], "optional": ["full_sampler_opt", "upscale_model_opt", "pk_hook_base_opt", "pk_hook_mask_opt", "pk_hook_full_opt"]}, "is_input_list": false, "output": ["UPSCALER"], "output_is_list": [false], "output_name": ["UPSCALER"], "name": "TwoSamplersForMaskUpscalerProvider", "display_name": "TwoSamplersForMask Upscaler Provider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TwoSamplersForMaskUpscalerProviderPipe": {"input": {"required": {"scale_method": [["nearest-exact", "bilinear", "lanczos", "area"]], "full_sample_schedule": [["none", "interleave1", "interleave2", "interleave3", "last1", "last2", "interleave1+last1", "interleave2+last1", "interleave3+last1"]], "use_tiled_vae": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "base_sampler": ["KSAMPLER"], "mask_sampler": ["KSAMPLER"], "mask": ["MASK"], "basic_pipe": ["BASIC_PIPE"], "tile_size": ["INT", {"default": 512, "min": 320, "max": 4096, "step": 64}]}, "optional": {"full_sampler_opt": ["KSAMPLER"], "upscale_model_opt": ["UPSCALE_MODEL"], "pk_hook_base_opt": ["PK_HOOK"], "pk_hook_mask_opt": ["PK_HOOK"], "pk_hook_full_opt": ["PK_HOOK"]}}, "input_order": {"required": ["scale_method", "full_sample_schedule", "use_tiled_vae", "base_sampler", "mask_sampler", "mask", "basic_pipe", "tile_size"], "optional": ["full_sampler_opt", "upscale_model_opt", "pk_hook_base_opt", "pk_hook_mask_opt", "pk_hook_full_opt"]}, "is_input_list": false, "output": ["UPSCALER"], "output_is_list": [false], "output_name": ["UPSCALER"], "name": "TwoSamplersForMaskUpscalerProviderPipe", "display_name": "TwoSamplersForMask Upscaler Provider (pipe)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PixelKSampleHookCombine": {"input": {"required": {"hook1": ["PK_HOOK"], "hook2": ["PK_HOOK"]}}, "input_order": {"required": ["hook1", "hook2"]}, "is_input_list": false, "output": ["PK_HOOK"], "output_is_list": [false], "output_name": ["PK_HOOK"], "name": "PixelKSampleHookCombine", "display_name": "PixelKSampleHookCombine", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DenoiseScheduleHookProvider": {"input": {"required": {"schedule_for_iteration": [["simple"]], "target_denoise": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["schedule_for_iteration", "target_denoise"]}, "is_input_list": false, "output": ["PK_HOOK"], "output_is_list": [false], "output_name": ["PK_HOOK"], "name": "DenoiseScheduleHookProvider", "display_name": "DenoiseScheduleHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StepsScheduleHookProvider": {"input": {"required": {"schedule_for_iteration": [["simple"]], "target_steps": ["INT", {"default": 20, "min": 1, "max": 10000}]}}, "input_order": {"required": ["schedule_for_iteration", "target_steps"]}, "is_input_list": false, "output": ["PK_HOOK"], "output_is_list": [false], "output_name": ["PK_HOOK"], "name": "StepsScheduleHookProvider", "display_name": "StepsScheduleHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CfgScheduleHookProvider": {"input": {"required": {"schedule_for_iteration": [["simple"]], "target_cfg": ["FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}]}}, "input_order": {"required": ["schedule_for_iteration", "target_cfg"]}, "is_input_list": false, "output": ["PK_HOOK"], "output_is_list": [false], "output_name": ["PK_HOOK"], "name": "CfgScheduleHookProvider", "display_name": "CfgScheduleHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "NoiseInjectionHookProvider": {"input": {"required": {"schedule_for_iteration": [["simple"]], "source": [["CPU", "GPU"]], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "start_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}], "end_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}]}}, "input_order": {"required": ["schedule_for_iteration", "source", "seed", "start_strength", "end_strength"]}, "is_input_list": false, "output": ["PK_HOOK"], "output_is_list": [false], "output_name": ["PK_HOOK"], "name": "NoiseInjectionHookProvider", "display_name": "NoiseInjectionHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "UnsamplerHookProvider": {"input": {"required": {"model": ["MODEL"], "steps": ["INT", {"default": 25, "min": 1, "max": 10000}], "start_end_at_step": ["INT", {"default": 21, "min": 0, "max": 10000}], "end_end_at_step": ["INT", {"default": 24, "min": 0, "max": 10000}], "cfg": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "normalize": [["disable", "enable"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "schedule_for_iteration": [["simple"]]}}, "input_order": {"required": ["model", "steps", "start_end_at_step", "end_end_at_step", "cfg", "sampler_name", "scheduler", "normalize", "positive", "negative", "schedule_for_iteration"]}, "is_input_list": false, "output": ["PK_HOOK"], "output_is_list": [false], "output_name": ["PK_HOOK"], "name": "UnsamplerHookProvider", "display_name": "UnsamplerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CoreMLDetailerHookProvider": {"input": {"required": {"mode": [["512x512", "768x768", "512x768", "768x512"]]}}, "input_order": {"required": ["mode"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "CoreMLDetailerHookProvider", "display_name": "CoreMLDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PreviewDetailerHookProvider": {"input": {"required": {"quality": ["INT", {"default": 95, "min": 20, "max": 100}]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["quality"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["DETAILER_HOOK", "UPSCALER_HOOK"], "output_is_list": [false, false], "output_name": ["DETAILER_HOOK", "UPSCALER_HOOK"], "name": "PreviewDetailerHookProvider", "display_name": "PreviewDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BlackPatchRetryHookProvider": {"input": {"required": {"mean_thresh": ["INT", {"default": 10, "min": 0, "max": 255}], "var_thresh": ["INT", {"default": 5, "min": 0, "max": 255}]}}, "input_order": {"required": ["mean_thresh", "var_thresh"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "BlackPatchRetryHookProvider", "display_name": "BlackPatchRetryHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CustomSamplerDetailerHookProvider": {"input": {"required": {"sampler": ["SAMPLER"]}}, "input_order": {"required": ["sampler"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "CustomSamplerDetailerHookProvider", "display_name": "CustomSamplerDetailerHookProvider", "description": "Apply a hook that allows you to use a custom sampler in the Detailer nodes. When using `DetailerHookCombine`, the sampler from the first hook is applied.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LamaRemoverDetailerHookProvider": {"input": {"required": {"mask_threshold": ["INT", {"default": 250, "min": 0, "max": 255, "step": 1, "display": "slider"}], "gaussblur_radius": ["INT", {"default": 8, "min": 0, "max": 20, "step": 1, "display": "slider"}], "skip_sampling": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["mask_threshold", "gaussblur_radius", "skip_sampling"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "LamaRemoverDetailerHookProvider", "display_name": "LamaRemoverDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DetailerHookCombine": {"input": {"required": {"hook1": ["DETAILER_HOOK"], "hook2": ["DETAILER_HOOK"]}}, "input_order": {"required": ["hook1", "hook2"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "DetailerHookCombine", "display_name": "DetailerHookCombine", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "NoiseInjectionDetailerHookProvider": {"input": {"required": {"schedule_for_cycle": [["skip_start", "from_start"]], "source": [["CPU", "GPU"]], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "start_strength": ["FLOAT", {"default": 2.0, "min": 0.0, "max": 200.0, "step": 0.01}], "end_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}]}}, "input_order": {"required": ["schedule_for_cycle", "source", "seed", "start_strength", "end_strength"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "NoiseInjectionDetailerHookProvider", "display_name": "NoiseInjectionDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "UnsamplerDetailerHookProvider": {"input": {"required": {"model": ["MODEL"], "steps": ["INT", {"default": 25, "min": 1, "max": 10000}], "start_end_at_step": ["INT", {"default": 21, "min": 0, "max": 10000}], "end_end_at_step": ["INT", {"default": 24, "min": 0, "max": 10000}], "cfg": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "normalize": [["disable", "enable"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "schedule_for_cycle": [["skip_start", "from_start"]]}}, "input_order": {"required": ["model", "steps", "start_end_at_step", "end_end_at_step", "cfg", "sampler_name", "scheduler", "normalize", "positive", "negative", "schedule_for_cycle"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "UnsamplerDetailerHookProvider", "display_name": "UnsamplerDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DenoiseSchedulerDetailerHookProvider": {"input": {"required": {"schedule_for_cycle": [["simple"]], "target_denoise": ["FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["schedule_for_cycle", "target_denoise"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "DenoiseSchedulerDetailerHookProvider", "display_name": "DenoiseSchedulerDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSOrderedFilterDetailerHookProvider": {"input": {"required": {"target": [["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2"]], "order": ["BOOLEAN", {"default": true, "label_on": "descending", "label_off": "ascending"}], "take_start": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "take_count": ["INT", {"default": 1, "min": 0, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["target", "order", "take_start", "take_count"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "SEGSOrderedFilterDetailerHookProvider", "display_name": "SEGSOrderedFilterDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSRangeFilterDetailerHookProvider": {"input": {"required": {"target": [["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "length_percent"]], "mode": ["BOOLEAN", {"default": true, "label_on": "inside", "label_off": "outside"}], "min_value": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "max_value": ["INT", {"default": 67108864, "min": 0, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["target", "mode", "min_value", "max_value"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "SEGSRangeFilterDetailerHookProvider", "display_name": "SEGSRangeFilterDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSLabelFilterDetailerHookProvider": {"input": {"required": {"segs": ["SEGS"], "preset": [["all", "hand", "face", "mouth", "eyes", "eyebrows", "pupils", "left_eyebrow", "left_eye", "left_pupil", "right_eyebrow", "right_eye", "right_pupil", "short_sleeved_shirt", "long_sleeved_shirt", "short_sleeved_outwear", "long_sleeved_outwear", "vest", "sling", "shorts", "trousers", "skirt", "short_sleeved_dress", "long_sleeved_dress", "vest_dress", "sling_dress", "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]], "labels": ["STRING", {"multiline": true, "placeholder": "List the types of segments to be allowed, separated by commas"}]}}, "input_order": {"required": ["segs", "preset", "labels"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "SEGSLabelFilterDetailerHookProvider", "display_name": "SEGSLabelFilterDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VariationNoiseDetailerHookProvider": {"input": {"required": {"seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "strength": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["seed", "strength"]}, "is_input_list": false, "output": ["DETAILER_HOOK"], "output_is_list": [false], "output_name": ["DETAILER_HOOK"], "name": "VariationNoiseDetailerHookProvider", "display_name": "VariationNoiseDetailerHookProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BitwiseAndMask": {"input": {"required": {"mask1": ["MASK"], "mask2": ["MASK"]}}, "input_order": {"required": ["mask1", "mask2"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "BitwiseAndMask", "display_name": "Pixelwise(MASK & MASK)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SubtractMask": {"input": {"required": {"mask1": ["MASK"], "mask2": ["MASK"]}}, "input_order": {"required": ["mask1", "mask2"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "SubtractMask", "display_name": "Pixelwise(MASK - MASK)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AddMask": {"input": {"required": {"mask1": ["MASK"], "mask2": ["MASK"]}}, "input_order": {"required": ["mask1", "mask2"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "AddMask", "display_name": "Pixelwise(MASK + MASK)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaskRectArea": {"input": {"required": {}, "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": [], "hidden": ["extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "MaskRectArea", "display_name": "Mask Rect Area", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaskRectAreaAdvanced": {"input": {"required": {}, "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": [], "hidden": ["extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "MaskRectAreaAdvanced", "display_name": "Mask Rect Area (Advanced)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSegsAndMask": {"input": {"required": {"segs": ["SEGS"], "mask": ["MASK"]}}, "input_order": {"required": ["segs", "mask"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSegsAndMask", "display_name": "Pixelwise(SEGS & MASK)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSegsAndMaskForEach": {"input": {"required": {"segs": ["SEGS"], "masks": ["MASK"]}}, "input_order": {"required": ["segs", "masks"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSegsAndMaskForEach", "display_name": "Pixelwise(SEGS & MASKS ForEach)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "EmptySegs": {"input": {"required": {}}, "input_order": {"required": []}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "EmptySegs", "display_name": "EmptySegs", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactFlattenMask": {"input": {"required": {"masks": ["MASK"]}}, "input_order": {"required": ["masks"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "ImpactFlattenMask", "display_name": "Flatten Mask Batch", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MediaPipeFaceMeshToSEGS": {"input": {"required": {"image": ["IMAGE"], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "bbox_fill": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "crop_min_size": ["INT", {"min": 10, "max": 16384, "step": 1, "default": 50}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 1}], "dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "face": ["BOOLEAN", {"default": true, "label_on": "Enabled", "label_off": "Disabled"}], "mouth": ["BOOLEAN", {"default": false, "label_on": "Enabled", "label_off": "Disabled"}], "left_eyebrow": ["BOOLEAN", {"default": false, "label_on": "Enabled", "label_off": "Disabled"}], "left_eye": ["BOOLEAN", {"default": false, "label_on": "Enabled", "label_off": "Disabled"}], "left_pupil": ["BOOLEAN", {"default": false, "label_on": "Enabled", "label_off": "Disabled"}], "right_eyebrow": ["BOOLEAN", {"default": false, "label_on": "Enabled", "label_off": "Disabled"}], "right_eye": ["BOOLEAN", {"default": false, "label_on": "Enabled", "label_off": "Disabled"}], "right_pupil": ["BOOLEAN", {"default": false, "label_on": "Enabled", "label_off": "Disabled"}]}}, "input_order": {"required": ["image", "crop_factor", "bbox_fill", "crop_min_size", "drop_size", "dilation", "face", "mouth", "left_eyebrow", "left_eye", "left_pupil", "right_eyebrow", "right_eye", "right_pupil"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "MediaPipeFaceMeshToSEGS", "display_name": "MediaPipe FaceMesh to SEGS", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaskToSEGS": {"input": {"required": {"mask": ["MASK"], "combined": ["BOOLEAN", {"default": false, "label_on": "True", "label_off": "False"}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "bbox_fill": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "contour_fill": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["mask", "combined", "crop_factor", "bbox_fill", "drop_size", "contour_fill"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "MaskToSEGS", "display_name": "MASK to SEGS", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaskToSEGS_for_AnimateDiff": {"input": {"required": {"mask": ["MASK"], "combined": ["BOOLEAN", {"default": false, "label_on": "True", "label_off": "False"}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "bbox_fill": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "contour_fill": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}]}}, "input_order": {"required": ["mask", "combined", "crop_factor", "bbox_fill", "drop_size", "contour_fill"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "MaskToSEGS_for_AnimateDiff", "display_name": "MASK to SEGS for Video", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ToBinaryMask": {"input": {"required": {"mask": ["MASK"], "threshold": ["INT", {"default": 20, "min": 1, "max": 255}]}}, "input_order": {"required": ["mask", "threshold"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "ToBinaryMask", "display_name": "ToBinaryMask", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MasksToMaskList": {"input": {"optional": {"masks": ["MASK"]}}, "input_order": {"optional": ["masks"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [true], "output_name": ["MASK"], "name": "MasksToMaskList", "display_name": "Mask Batch to Mask List", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaskListToMaskBatch": {"input": {"required": {"mask": ["MASK"]}}, "input_order": {"required": ["mask"]}, "is_input_list": true, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "MaskListToMaskBatch", "display_name": "Mask List to Mask Batch", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageListToImageBatch": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": true, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageListToImageBatch", "display_name": "Image List to Image Batch", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SetDefaultImageForSEGS": {"input": {"required": {"segs": ["SEGS"], "image": ["IMAGE"], "override": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["segs", "image", "override"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "SetDefaultImageForSEGS", "display_name": "Set Default Image for SEGS", "description": "If the SEGS have not passed through the detailer, they contain only detection area information without an image. This node sets a default image for the SEGS.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RemoveImageFromSEGS": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "RemoveImageFromSEGS", "display_name": "Remove Image from SEGS", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BboxDetectorSEGS": {"input": {"required": {"bbox_detector": ["BBOX_DETECTOR"], "image": ["IMAGE"], "threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "labels": ["STRING", {"multiline": true, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["bbox_detector", "image", "threshold", "dilation", "crop_factor", "drop_size", "labels"], "optional": ["detailer_hook"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "BboxDetectorSEGS", "display_name": "BBOX Detector (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SegmDetectorSEGS": {"input": {"required": {"segm_detector": ["SEGM_DETECTOR"], "image": ["IMAGE"], "threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "labels": ["STRING", {"multiline": true, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["segm_detector", "image", "threshold", "dilation", "crop_factor", "drop_size", "labels"], "optional": ["detailer_hook"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "SegmDetectorSEGS", "display_name": "SEGM Detector (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ONNXDetectorSEGS": {"input": {"required": {"bbox_detector": ["BBOX_DETECTOR"], "image": ["IMAGE"], "threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "labels": ["STRING", {"multiline": true, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}]}, "optional": {"detailer_hook": ["DETAILER_HOOK"]}}, "input_order": {"required": ["bbox_detector", "image", "threshold", "dilation", "crop_factor", "drop_size", "labels"], "optional": ["detailer_hook"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ONNXDetectorSEGS", "display_name": "ONNX Detector (SEGS/legacy) - use BBOXDetector", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSimpleDetectorSEGS_for_AD": {"input": {"required": {"bbox_detector": ["BBOX_DETECTOR"], "image_frames": ["IMAGE"], "bbox_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "bbox_dilation": ["INT", {"default": 0, "min": -255, "max": 255, "step": 1}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "sub_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "sub_dilation": ["INT", {"default": 0, "min": -255, "max": 255, "step": 1}], "sub_bbox_expansion": ["INT", {"default": 0, "min": 0, "max": 1000, "step": 1}], "sam_mask_hint_threshold": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"masking_mode": [["Pivot SEGS", "Combine neighboring frames", "Don't combine"]], "segs_pivot": [["Combined mask", "1st frame mask"]], "sam_model_opt": ["SAM_MODEL", {"tooltip": "[OPTIONAL]\nSegment Anything Model for Silhouette Detection.\nBe sure to use the SAM_MODEL loaded through the SAMLoader (Impact) node as input.\nGiven this input, it refines the rectangular areas detected by BBOX_DETECTOR into silhouette shapes through SAM.\nsam_model_opt takes priority over segm_detector_opt."}], "segm_detector_opt": ["SEGM_DETECTOR"]}}, "input_order": {"required": ["bbox_detector", "image_frames", "bbox_threshold", "bbox_dilation", "crop_factor", "drop_size", "sub_threshold", "sub_dilation", "sub_bbox_expansion", "sam_mask_hint_threshold"], "optional": ["masking_mode", "segs_pivot", "sam_model_opt", "segm_detector_opt"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSimpleDetectorSEGS_for_AD", "display_name": "Simple Detector for Video (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSAM2VideoDetectorSEGS": {"input": {"required": {"image_frames": ["IMAGE"], "bbox_detector": ["BBOX_DETECTOR"], "sam2_model": ["SAM_MODEL"], "bbox_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "sam2_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}]}}, "input_order": {"required": ["image_frames", "bbox_detector", "sam2_model", "bbox_threshold", "sam2_threshold", "crop_factor", "drop_size"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSAM2VideoDetectorSEGS", "display_name": "SAM2 Video Detector (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSimpleDetectorSEGS": {"input": {"required": {"bbox_detector": ["BBOX_DETECTOR"], "image": ["IMAGE"], "bbox_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "bbox_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "sub_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "sub_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "sub_bbox_expansion": ["INT", {"default": 0, "min": 0, "max": 1000, "step": 1}], "sam_mask_hint_threshold": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"post_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "sam_model_opt": ["SAM_MODEL", {"tooltip": "[OPTIONAL]\nSegment Anything Model for Silhouette Detection.\nBe sure to use the SAM_MODEL loaded through the SAMLoader (Impact) node as input.\nGiven this input, it refines the rectangular areas detected by BBOX_DETECTOR into silhouette shapes through SAM.\nsam_model_opt takes priority over segm_detector_opt."}], "segm_detector_opt": ["SEGM_DETECTOR"]}}, "input_order": {"required": ["bbox_detector", "image", "bbox_threshold", "bbox_dilation", "crop_factor", "drop_size", "sub_threshold", "sub_dilation", "sub_bbox_expansion", "sam_mask_hint_threshold"], "optional": ["post_dilation", "sam_model_opt", "segm_detector_opt"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSimpleDetectorSEGS", "display_name": "Simple Detector (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSimpleDetectorSEGSPipe": {"input": {"required": {"detailer_pipe": ["DETAILER_PIPE"], "image": ["IMAGE"], "bbox_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "bbox_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}], "drop_size": ["INT", {"min": 1, "max": 16384, "step": 1, "default": 10}], "sub_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "sub_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "sub_bbox_expansion": ["INT", {"default": 0, "min": 0, "max": 1000, "step": 1}], "sam_mask_hint_threshold": ["FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"post_dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}]}}, "input_order": {"required": ["detailer_pipe", "image", "bbox_threshold", "bbox_dilation", "crop_factor", "drop_size", "sub_threshold", "sub_dilation", "sub_bbox_expansion", "sam_mask_hint_threshold"], "optional": ["post_dilation"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSimpleDetectorSEGSPipe", "display_name": "Simple Detector (SEGS/pipe)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactControlNetApplySEGS": {"input": {"required": {"segs": ["SEGS"], "control_net": ["CONTROL_NET"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}, "optional": {"segs_preprocessor": ["SEGS_PREPROCESSOR"], "control_image": ["IMAGE"]}}, "input_order": {"required": ["segs", "control_net", "strength"], "optional": ["segs_preprocessor", "control_image"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactControlNetApplySEGS", "display_name": "ControlNetApply (SEGS) - DEPRECATED", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "ImpactControlNetApplyAdvancedSEGS": {"input": {"required": {"segs": ["SEGS"], "control_net": ["CONTROL_NET"], "strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}, "optional": {"segs_preprocessor": ["SEGS_PREPROCESSOR"], "control_image": ["IMAGE"], "vae": ["VAE"]}}, "input_order": {"required": ["segs", "control_net", "strength", "start_percent", "end_percent"], "optional": ["segs_preprocessor", "control_image", "vae"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactControlNetApplyAdvancedSEGS", "display_name": "ControlNetApply (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactControlNetClearSEGS": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactControlNetClearSEGS", "display_name": "ImpactControlNetClearSEGS", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactIPAdapterApplySEGS": {"input": {"required": {"segs": ["SEGS"], "ipadapter_pipe": ["IPADAPTER_PIPE"], "weight": ["FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}], "noise": ["FLOAT", {"default": 0.4, "min": 0.0, "max": 1.0, "step": 0.01}], "weight_type": [["original", "linear", "channel penalty"], {"default": "channel penalty"}], "start_at": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}], "end_at": ["FLOAT", {"default": 0.9, "min": 0.0, "max": 1.0, "step": 0.001}], "unfold_batch": ["BOOLEAN", {"default": false}], "faceid_v2": ["BOOLEAN", {"default": false}], "weight_v2": ["FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}], "context_crop_factor": ["FLOAT", {"default": 1.2, "min": 1.0, "max": 100, "step": 0.1}], "reference_image": ["IMAGE"]}, "optional": {"combine_embeds": [["concat", "add", "subtract", "average", "norm average"]], "neg_image": ["IMAGE"]}}, "input_order": {"required": ["segs", "ipadapter_pipe", "weight", "noise", "weight_type", "start_at", "end_at", "unfold_batch", "faceid_v2", "weight_v2", "context_crop_factor", "reference_image"], "optional": ["combine_embeds", "neg_image"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactIPAdapterApplySEGS", "display_name": "IPAdapterApply (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactDecomposeSEGS": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["SEGS_HEADER", "SEG_ELT"], "output_is_list": [false, true], "output_name": ["SEGS_HEADER", "SEG_ELT"], "name": "ImpactDecomposeSEGS", "display_name": "Decompose (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactAssembleSEGS": {"input": {"required": {"seg_header": ["SEGS_HEADER"], "seg_elt": ["SEG_ELT"]}}, "input_order": {"required": ["seg_header", "seg_elt"]}, "is_input_list": true, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactAssembleSEGS", "display_name": "Assemble (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactFrom_SEG_ELT": {"input": {"required": {"seg_elt": ["SEG_ELT"]}}, "input_order": {"required": ["seg_elt"]}, "is_input_list": false, "output": ["SEG_ELT", "IMAGE", "MASK", "SEG_ELT_crop_region", "SEG_ELT_bbox", "SEG_ELT_control_net_wrapper", "FLOAT", "STRING"], "output_is_list": [false, false, false, false, false, false, false, false], "output_name": ["seg_elt", "cropped_image", "cropped_mask", "crop_region", "bbox", "control_net_wrapper", "confidence", "label"], "name": "ImpactFrom_SEG_ELT", "display_name": "From SEG_ELT", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactEdit_SEG_ELT": {"input": {"required": {"seg_elt": ["SEG_ELT"]}, "optional": {"cropped_image_opt": ["IMAGE"], "cropped_mask_opt": ["MASK"], "crop_region_opt": ["SEG_ELT_crop_region"], "bbox_opt": ["SEG_ELT_bbox"], "control_net_wrapper_opt": ["SEG_ELT_control_net_wrapper"], "confidence_opt": ["FLOAT", {"min": 0, "max": 1.0, "step": 0.1, "forceInput": true}], "label_opt": ["STRING", {"multiline": false, "forceInput": true}]}}, "input_order": {"required": ["seg_elt"], "optional": ["cropped_image_opt", "cropped_mask_opt", "crop_region_opt", "bbox_opt", "control_net_wrapper_opt", "confidence_opt", "label_opt"]}, "is_input_list": false, "output": ["SEG_ELT"], "output_is_list": [false], "output_name": ["SEG_ELT"], "name": "ImpactEdit_SEG_ELT", "display_name": "Edit SEG_ELT", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactDilate_Mask_SEG_ELT": {"input": {"required": {"seg_elt": ["SEG_ELT"], "dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}]}}, "input_order": {"required": ["seg_elt", "dilation"]}, "is_input_list": false, "output": ["SEG_ELT"], "output_is_list": [false], "output_name": ["SEG_ELT"], "name": "ImpactDilate_Mask_SEG_ELT", "display_name": "Dilate Mask (SEG_ELT)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactDilateMask": {"input": {"required": {"mask": ["MASK"], "dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}]}}, "input_order": {"required": ["mask", "dilation"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "ImpactDilateMask", "display_name": "Dilate Mask", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactGaussianBlurMask": {"input": {"required": {"mask": ["MASK"], "kernel_size": ["INT", {"default": 10, "min": 0, "max": 100, "step": 1}], "sigma": ["FLOAT", {"default": 10.0, "min": 0.1, "max": 100.0, "step": 0.1}]}}, "input_order": {"required": ["mask", "kernel_size", "sigma"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "ImpactGaussianBlurMask", "display_name": "Gaussian Blur Mask", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactDilateMaskInSEGS": {"input": {"required": {"segs": ["SEGS"], "dilation": ["INT", {"default": 10, "min": -512, "max": 512, "step": 1}]}}, "input_order": {"required": ["segs", "dilation"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactDilateMaskInSEGS", "display_name": "Dilate Mask (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactGaussianBlurMaskInSEGS": {"input": {"required": {"segs": ["SEGS"], "kernel_size": ["INT", {"default": 10, "min": 0, "max": 100, "step": 1}], "sigma": ["FLOAT", {"default": 10.0, "min": 0.1, "max": 100.0, "step": 0.1}]}}, "input_order": {"required": ["segs", "kernel_size", "sigma"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactGaussianBlurMaskInSEGS", "display_name": "Gaussian Blur Mask (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactScaleBy_BBOX_SEG_ELT": {"input": {"required": {"seg": ["SEG_ELT"], "scale_by": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}]}}, "input_order": {"required": ["seg", "scale_by"]}, "is_input_list": false, "output": ["SEG_ELT"], "output_is_list": [false], "output_name": ["SEG_ELT"], "name": "ImpactScaleBy_BBOX_SEG_ELT", "display_name": "ScaleBy BBOX (SEG_ELT)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactFrom_SEG_ELT_bbox": {"input": {"required": {"bbox": ["SEG_ELT_bbox"]}}, "input_order": {"required": ["bbox"]}, "is_input_list": false, "output": ["INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["left", "top", "right", "bottom"], "name": "ImpactFrom_SEG_ELT_bbox", "display_name": "From SEG_ELT bbox", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactFrom_SEG_ELT_crop_region": {"input": {"required": {"crop_region": ["SEG_ELT_crop_region"]}}, "input_order": {"required": ["crop_region"]}, "is_input_list": false, "output": ["INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["left", "top", "right", "bottom"], "name": "ImpactFrom_SEG_ELT_crop_region", "display_name": "From SEG_ELT crop_region", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactCount_Elts_in_SEGS": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "ImpactCount_Elts_in_SEGS", "display_name": "Count Elts in SEGS", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BboxDetectorCombined_v2": {"input": {"required": {"bbox_detector": ["BBOX_DETECTOR"], "image": ["IMAGE"], "threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "dilation": ["INT", {"default": 4, "min": -512, "max": 512, "step": 1}]}}, "input_order": {"required": ["bbox_detector", "image", "threshold", "dilation"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "BboxDetectorCombined_v2", "display_name": "BBOX Detector (combined)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SegmDetectorCombined_v2": {"input": {"required": {"segm_detector": ["SEGM_DETECTOR"], "image": ["IMAGE"], "threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "dilation": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}]}}, "input_order": {"required": ["segm_detector", "image", "threshold", "dilation"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "SegmDetectorCombined_v2", "display_name": "SEGM Detector (combined)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detector", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SegsToCombinedMask": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "SegsToCombinedMask", "display_name": "SEGS to MASK (combined)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Operation", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "KSamplerProvider": {"input": {"required": {"seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Random seed to use for generating CPU noise for sampling."}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"tooltip": "sampler"}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"], {"tooltip": "noise schedule"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}], "basic_pipe": ["BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}]}, "optional": {"scheduler_func_opt": ["SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}]}}, "input_order": {"required": ["seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "basic_pipe"], "optional": ["scheduler_func_opt"]}, "is_input_list": false, "output": ["KSAMPLER"], "output_is_list": [false], "output_name": ["KSAMPLER"], "name": "KSamplerProvider", "display_name": "KSamplerProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Sampler", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["sampler wrapper. (Can be used when generating a regional_prompt.)"], "search_aliases": []}, "TwoSamplersForMask": {"input": {"required": {"latent_image": ["LATENT", {"tooltip": "input latent image"}], "base_sampler": ["KSAMPLER", {"tooltip": "Sampler to apply to the region outside the mask."}], "mask_sampler": ["KSAMPLER", {"tooltip": "Sampler to apply to the masked region."}], "mask": ["MASK", {"tooltip": "region mask"}]}}, "input_order": {"required": ["latent_image", "base_sampler", "mask_sampler", "mask"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "TwoSamplersForMask", "display_name": "TwoSamplersForMask", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Sampler", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["result latent"], "search_aliases": []}, "TiledKSamplerProvider": {"input": {"required": {"seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Random seed to use for generating CPU noise for sampling."}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"tooltip": "sampler"}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], {"tooltip": "noise schedule"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}], "tile_width": ["INT", {"default": 512, "min": 320, "max": 16384, "step": 64, "tooltip": "Sets the width of the tile to be used in TiledKSampler."}], "tile_height": ["INT", {"default": 512, "min": 320, "max": 16384, "step": 64, "tooltip": "Sets the height of the tile to be used in TiledKSampler."}], "tiling_strategy": [["random", "padded", "simple"], {"tooltip": "Sets the tiling strategy for TiledKSampler."}], "basic_pipe": ["BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}]}}, "input_order": {"required": ["seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "tile_width", "tile_height", "tiling_strategy", "basic_pipe"]}, "is_input_list": false, "output": ["KSAMPLER"], "output_is_list": [false], "output_name": ["KSAMPLER"], "name": "TiledKSamplerProvider", "display_name": "TiledKSamplerProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Sampler", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["sampler wrapper. (Can be used when generating a regional_prompt.)"], "search_aliases": []}, "KSamplerAdvancedProvider": {"input": {"required": {"cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "toolip": "classifier free guidance value"}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"toolip": "sampler"}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"], {"toolip": "noise schedule"}], "sigma_factor": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "toolip": "Multiplier of noise schedule"}], "basic_pipe": ["BASIC_PIPE", {"toolip": "basic_pipe input for sampling"}]}, "optional": {"sampler_opt": ["SAMPLER", {"toolip": "[OPTIONAL] Uses the passed sampler instead of internal impact_sampler."}], "scheduler_func_opt": ["SCHEDULER_FUNC", {"toolip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}]}}, "input_order": {"required": ["cfg", "sampler_name", "scheduler", "sigma_factor", "basic_pipe"], "optional": ["sampler_opt", "scheduler_func_opt"]}, "is_input_list": false, "output": ["KSAMPLER_ADVANCED"], "output_is_list": [false], "output_name": ["KSAMPLER_ADVANCED"], "name": "KSamplerAdvancedProvider", "display_name": "KSamplerAdvancedProvider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Sampler", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["sampler wrapper. (Can be used when generating a regional_prompt.)"], "search_aliases": []}, "TwoAdvancedSamplersForMask": {"input": {"required": {"seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Random seed to use for generating CPU noise for sampling."}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}], "samples": ["LATENT", {"tooltip": "input latent image"}], "base_sampler": ["KSAMPLER_ADVANCED", {"tooltip": "Sampler to apply to the region outside the mask."}], "mask_sampler": ["KSAMPLER_ADVANCED", {"tooltip": "Sampler to apply to the masked region."}], "mask": ["MASK", {"tooltip": "region mask"}], "overlap_factor": ["INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask by the overlap_factor amount to overlap with other regions."}]}}, "input_order": {"required": ["seed", "steps", "denoise", "samples", "base_sampler", "mask_sampler", "mask", "overlap_factor"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "TwoAdvancedSamplersForMask", "display_name": "TwoAdvancedSamplersForMask", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Sampler", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["result latent"], "search_aliases": []}, "ImpactNegativeConditioningPlaceholder": {"input": {"required": {}}, "input_order": {"required": []}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ImpactNegativeConditioningPlaceholder", "display_name": "Negative Cond Placeholder", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/sampling", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["This is a Placeholder for the FLUX model that does not use Negative Conditioning."], "search_aliases": []}, "PreviewBridge": {"input": {"required": {"images": ["IMAGE"], "image": ["STRING", {"default": ""}]}, "optional": {"block": ["BOOLEAN", {"default": false, "label_on": "if_empty_mask", "label_off": "never", "tooltip": "is_empty_mask: If the mask is empty, the execution is stopped.\nnever: The execution is never stopped."}], "restore_mask": [["never", "always", "if_same_size"], {"tooltip": "if_same_size: If the changed input image is the same size as the previous image, restore using the last saved mask\nalways: Whenever the input image changes, always restore using the last saved mask\nnever: Do not restore the mask.\n`restore_mask` has higher priority than `block`"}]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "image"], "optional": ["block", "restore_mask"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "PreviewBridge", "display_name": "Preview Bridge (Image)", "description": "This is a feature that allows you to edit and send a Mask over a image.\nIf the block is set to 'is_empty_mask', the execution is stopped when the mask is empty.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "PreviewBridgeLatent": {"input": {"required": {"latent": ["LATENT"], "image": ["STRING", {"default": ""}], "preview_method": [["Latent2RGB-FLUX.1", "Latent2RGB-SDXL", "Latent2RGB-SD15", "Latent2RGB-SD3", "Latent2RGB-SD-X4", "Latent2RGB-Playground-2.5", "Latent2RGB-SC-Prior", "Latent2RGB-SC-B", "Latent2RGB-LTXV", "TAEF1", "TAESDXL", "TAESD15", "TAESD3"]]}, "optional": {"vae_opt": ["VAE"], "block": ["BOOLEAN", {"default": false, "label_on": "if_empty_mask", "label_off": "never", "tooltip": "is_empty_mask: If the mask is empty, the execution is stopped.\nnever: The execution is never stopped. Instead, it returns a white mask."}], "restore_mask": [["never", "always", "if_same_size"], {"tooltip": "if_same_size: If the changed input latent is the same size as the previous latent, restore using the last saved mask\nalways: Whenever the input latent changes, always restore using the last saved mask\nnever: Do not restore the mask.\n`restore_mask` has higher priority than `block`\nIf the input latent already has a mask, do not restore mask."}]}, "hidden": {"unique_id": "UNIQUE_ID", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["latent", "image", "preview_method"], "optional": ["vae_opt", "block", "restore_mask"], "hidden": ["unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["LATENT", "MASK"], "output_is_list": [false, false], "output_name": ["LATENT", "MASK"], "name": "PreviewBridgeLatent", "display_name": "Preview Bridge (Latent)", "description": "This is a feature that allows you to edit and send a Mask over a latent image.\nIf the block is set to 'is_empty_mask', the execution is stopped when the mask is empty.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImageSender": {"input": {"required": {"images": ["IMAGE"], "filename_prefix": ["STRING", {"default": "ImgSender"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "filename_prefix", "link_id"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "ImageSender", "display_name": "Image Sender", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": true, "has_intermediate_output": false, "search_aliases": ["preview", "preview image", "show image", "view image", "display image", "image viewer"], "essentials_category": "Basics"}, "ImageReceiver": {"input": {"required": {"image": [["2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"]], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "save_to_workflow": ["BOOLEAN", {"default": false}], "image_data": ["STRING", {"multiline": false}], "trigger_always": ["BOOLEAN", {"default": false, "label_on": "enable", "label_off": "disable"}]}}, "input_order": {"required": ["image", "link_id", "save_to_workflow", "image_data", "trigger_always"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "ImageReceiver", "display_name": "Image Receiver", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LatentSender": {"input": {"required": {"samples": ["LATENT"], "filename_prefix": ["STRING", {"default": "latents/LatentSender"}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "preview_method": [["Latent2RGB-FLUX.1", "Latent2RGB-SDXL", "Latent2RGB-SD15", "Latent2RGB-SD3", "Latent2RGB-SD-X4", "Latent2RGB-Playground-2.5", "Latent2RGB-SC-Prior", "Latent2RGB-SC-B", "Latent2RGB-LTXV", "TAEF1", "TAESDXL", "TAESD15", "TAESD3"]]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["samples", "filename_prefix", "link_id", "preview_method"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "LatentSender", "display_name": "LatentSender", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": true, "has_intermediate_output": false, "search_aliases": ["export latent"]}, "LatentReceiver": {"input": {"required": {"latent": [[]], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "trigger_always": ["BOOLEAN", {"default": false, "label_on": "enable", "label_off": "disable"}]}}, "input_order": {"required": ["latent", "link_id", "trigger_always"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "LatentReceiver", "display_name": "LatentReceiver", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageMaskSwitch": {"input": {"required": {"select": ["INT", {"default": 1, "min": 1, "max": 4, "step": 1}], "images1": ["IMAGE"]}, "optional": {"mask1_opt": ["MASK"], "images2_opt": ["IMAGE"], "mask2_opt": ["MASK"], "images3_opt": ["IMAGE"], "mask3_opt": ["MASK"], "images4_opt": ["IMAGE"], "mask4_opt": ["MASK"]}}, "input_order": {"required": ["select", "images1"], "optional": ["mask1_opt", "images2_opt", "mask2_opt", "images3_opt", "mask3_opt", "images4_opt", "mask4_opt"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "ImageMaskSwitch", "display_name": "Switch (images, mask)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "LatentSwitch": {"input": {"required": {"select": ["INT", {"default": 1, "min": 1, "max": 999999, "step": 1, "tooltip": "The input number you want to output among the inputs"}], "sel_mode": ["BOOLEAN", {"default": false, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": false, "tooltip": "In the case of 'select_on_execution', the selection is dynamically determined at the time of workflow execution. 'select_on_prompt' is an option that exists for older versions of ComfyUI, and it makes the decision before the workflow execution."}]}, "optional": {"input1": ["*", {"lazy": true, "tooltip": "Any input. When connected, one more input slot is added."}]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["select", "sel_mode"], "optional": ["input1"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["*", "STRING", "INT"], "output_is_list": [false, false, false], "output_name": ["selected_value", "selected_label", "selected_index"], "name": "LatentSwitch", "display_name": "Switch (latent/legacy)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["Output is generated only from the input chosen by the 'select' value.", "Slot label of the selected input slot", "Outputs the select value as is"], "search_aliases": []}, "SEGSSwitch": {"input": {"required": {"select": ["INT", {"default": 1, "min": 1, "max": 999999, "step": 1, "tooltip": "The input number you want to output among the inputs"}], "sel_mode": ["BOOLEAN", {"default": false, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": false, "tooltip": "In the case of 'select_on_execution', the selection is dynamically determined at the time of workflow execution. 'select_on_prompt' is an option that exists for older versions of ComfyUI, and it makes the decision before the workflow execution."}]}, "optional": {"input1": ["*", {"lazy": true, "tooltip": "Any input. When connected, one more input slot is added."}]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["select", "sel_mode"], "optional": ["input1"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["*", "STRING", "INT"], "output_is_list": [false, false, false], "output_name": ["selected_value", "selected_label", "selected_index"], "name": "SEGSSwitch", "display_name": "Switch (SEGS/legacy)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["Output is generated only from the input chosen by the 'select' value.", "Slot label of the selected input slot", "Outputs the select value as is"], "search_aliases": []}, "ImpactSwitch": {"input": {"required": {"select": ["INT", {"default": 1, "min": 1, "max": 999999, "step": 1, "tooltip": "The input number you want to output among the inputs"}], "sel_mode": ["BOOLEAN", {"default": false, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": false, "tooltip": "In the case of 'select_on_execution', the selection is dynamically determined at the time of workflow execution. 'select_on_prompt' is an option that exists for older versions of ComfyUI, and it makes the decision before the workflow execution."}]}, "optional": {"input1": ["*", {"lazy": true, "tooltip": "Any input. When connected, one more input slot is added."}]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["select", "sel_mode"], "optional": ["input1"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": ["*", "STRING", "INT"], "output_is_list": [false, false, false], "output_name": ["selected_value", "selected_label", "selected_index"], "name": "ImpactSwitch", "display_name": "Switch (Any)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["Output is generated only from the input chosen by the 'select' value.", "Slot label of the selected input slot", "Outputs the select value as is"], "search_aliases": []}, "ImpactInversedSwitch": {"input": {"required": {"select": ["INT", {"default": 1, "min": 1, "max": 999999, "step": 1, "tooltip": "The output number you want to send from the input"}], "input": ["*", {"tooltip": "Any input. When connected, one more input slot is added."}]}, "optional": {"sel_mode": ["BOOLEAN", {"default": false, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": false, "tooltip": "In the case of 'select_on_execution', the selection is dynamically determined at the time of workflow execution. 'select_on_prompt' is an option that exists for older versions of ComfyUI, and it makes the decision before the workflow execution."}]}, "hidden": {"prompt": "PROMPT", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["select", "input"], "optional": ["sel_mode"], "hidden": ["prompt", "unique_id"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "ImpactInversedSwitch", "display_name": "Inversed Switch (Any)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["Output occurs only from the output selected by the 'select' value.\nWhen slots are connected, additional slots are created."], "search_aliases": []}, "ImpactWildcardProcessor": {"input": {"required": {"wildcard_text": ["STRING", {"multiline": true, "dynamicPrompts": false, "tooltip": "Enter a prompt using wildcard syntax."}], "populated_text": ["STRING", {"multiline": true, "dynamicPrompts": false, "tooltip": "The actual value passed during the execution of 'ImpactWildcardProcessor' is what is shown here. The behavior varies slightly depending on the mode. Wildcard syntax can also be used in 'populated_text'."}], "mode": [["populate", "fixed", "reproduce"], {"default": "populate", "tooltip": "populate: Before running the workflow, it overwrites the existing value of 'populated_text' with the prompt processed from 'wildcard_text'. In this mode, 'populated_text' cannot be edited.\nfixed: Ignores wildcard_text and keeps 'populated_text' as is. You can edit 'populated_text' in this mode.\nreproduce: This mode operates as 'fixed' mode only once for reproduction, and then it switches to 'populate' mode."}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Determines the random seed to be used for wildcard processing."}], "Select to add Wildcard": [["Select the Wildcard to add to the text"]]}}, "input_order": {"required": ["wildcard_text", "populated_text", "mode", "seed", "Select to add Wildcard"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["processed text"], "name": "ImpactWildcardProcessor", "display_name": "ImpactWildcardProcessor", "description": "The 'ImpactWildcardProcessor' processes text prompts written in wildcard syntax and outputs the processed text prompt.\n\nTIP: Before the workflow is executed, the processing result of 'wildcard_text' is displayed in 'populated_text', and the populated text is saved along with the workflow. If you want to use a seed converted as input, write the prompt directly in 'populated_text' instead of 'wildcard_text', and set the mode to 'fixed'.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Prompt", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactWildcardEncode": {"input": {"required": {"model": ["MODEL"], "clip": ["CLIP"], "wildcard_text": ["STRING", {"multiline": true, "dynamicPrompts": false, "tooltip": "Enter a prompt using wildcard syntax."}], "populated_text": ["STRING", {"multiline": true, "dynamicPrompts": false, "tooltip": "The actual value passed during the execution of 'ImpactWildcardEncode' is what is shown here. The behavior varies slightly depending on the mode. Wildcard syntax can also be used in 'populated_text'."}], "mode": [["populate", "fixed", "reproduce"], {"tooltip": "populate: Before running the workflow, it overwrites the existing value of 'populated_text' with the prompt processed from 'wildcard_text'. In this mode, 'populated_text' cannot be edited.\nfixed: Ignores wildcard_text and keeps 'populated_text' as is. You can edit 'populated_text' in this mode\n.reproduce: This mode operates as 'fixed' mode only once for reproduction, and then it switches to 'populate' mode."}], "Select to add LoRA": [["Select the LoRA to add to the text", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "Select to add Wildcard": [["Select the Wildcard to add to the text"]], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Determines the random seed to be used for wildcard processing."}]}}, "input_order": {"required": ["model", "clip", "wildcard_text", "populated_text", "mode", "Select to add LoRA", "Select to add Wildcard", "seed"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "CONDITIONING", "STRING"], "output_is_list": [false, false, false, false], "output_name": ["model", "clip", "conditioning", "populated_text"], "name": "ImpactWildcardEncode", "display_name": "ImpactWildcardEncode", "description": "The 'ImpactWildcardEncode' node processes text prompts written in wildcard syntax and outputs them as conditioning. It also supports LoRA syntax, with the applied LoRA reflected in the model's output.\n\nTIP1: Before the workflow is executed, the processing result of 'wildcard_text' is displayed in 'populated_text', and the populated text is saved along with the workflow. If you want to use a seed converted as input, write the prompt directly in 'populated_text' instead of 'wildcard_text', and set the mode to 'fixed'.\nTIP2: If the 'Inspire Pack' is installed, LBW(LoRA Block Weight) syntax can also be applied.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Prompt", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSUpscaler": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "rescale_factor": ["FLOAT", {"default": 2, "min": 0.01, "max": 100.0, "step": 0.01}], "resampling_method": [["lanczos", "nearest", "bilinear", "bicubic"]], "supersample": [["true", "false"]], "rounding_modulus": ["INT", {"default": 8, "min": 8, "max": 1024, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}]}, "optional": {"upscale_model_opt": ["UPSCALE_MODEL"], "upscaler_hook_opt": ["UPSCALER_HOOK"], "scheduler_func_opt": ["SCHEDULER_FUNC"]}}, "input_order": {"required": ["image", "segs", "model", "clip", "vae", "rescale_factor", "resampling_method", "supersample", "rounding_modulus", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "denoise", "feather", "inpaint_model", "noise_mask_feather"], "optional": ["upscale_model_opt", "upscaler_hook_opt", "scheduler_func_opt"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "SEGSUpscaler", "display_name": "Upscaler (SEGS)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSUpscalerPipe": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "basic_pipe": ["BASIC_PIPE"], "rescale_factor": ["FLOAT", {"default": 2, "min": 0.01, "max": 100.0, "step": 0.01}], "resampling_method": [["lanczos", "nearest", "bilinear", "bicubic"]], "supersample": [["true", "false"]], "rounding_modulus": ["INT", {"default": 8, "min": 8, "max": 1024, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}]}, "optional": {"upscale_model_opt": ["UPSCALE_MODEL"], "upscaler_hook_opt": ["UPSCALER_HOOK"], "scheduler_func_opt": ["SCHEDULER_FUNC"]}}, "input_order": {"required": ["image", "segs", "basic_pipe", "rescale_factor", "resampling_method", "supersample", "rounding_modulus", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "feather", "inpaint_model", "noise_mask_feather"], "optional": ["upscale_model_opt", "upscaler_hook_opt", "scheduler_func_opt"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "SEGSUpscalerPipe", "display_name": "Upscaler (SEGS/pipe)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Upscale", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSDetailer": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 768, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "noise_mask": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "force_inpaint": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled"}], "basic_pipe": ["BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}], "refiner_ratio": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 100}], "cycle": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}, "optional": {"refiner_basic_pipe_opt": ["BASIC_PIPE"], "inpaint_model": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"]}}, "input_order": {"required": ["image", "segs", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "noise_mask", "force_inpaint", "basic_pipe", "refiner_ratio", "batch_size", "cycle"], "optional": ["refiner_basic_pipe_opt", "inpaint_model", "noise_mask_feather", "scheduler_func_opt"]}, "is_input_list": false, "output": ["SEGS", "IMAGE"], "output_is_list": [false, true], "output_name": ["segs", "cnet_images"], "name": "SEGSDetailer", "display_name": "SEGSDetailer", "description": "This node enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.\nThis node is applied specifically to SEGS rather than the entire image. To apply it to the entire image, use the 'SEGS Paste' node.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSPaste": {"input": {"required": {"image": ["IMAGE"], "segs": ["SEGS"], "feather": ["INT", {"default": 5, "min": 0, "max": 100, "step": 1}], "alpha": ["INT", {"default": 255, "min": 0, "max": 255, "step": 1}]}, "optional": {"ref_image_opt": ["IMAGE"]}}, "input_order": {"required": ["image", "segs", "feather", "alpha"], "optional": ["ref_image_opt"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "SEGSPaste", "display_name": "SEGSPaste", "description": "This node provides a function to paste the enhanced SEGS, improved through the SEGS detailer, back onto the original image.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSPreview": {"input": {"required": {"segs": ["SEGS"], "alpha_mode": ["BOOLEAN", {"default": true, "label_on": "enable", "label_off": "disable"}], "min_alpha": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"fallback_image_opt": ["IMAGE"]}}, "input_order": {"required": ["segs", "alpha_mode", "min_alpha"], "optional": ["fallback_image_opt"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "name": "SEGSPreview", "display_name": "SEGSPreview", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SEGSPreviewCNet": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "name": "SEGSPreviewCNet", "display_name": "SEGSPreview (CNET Image)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SEGSToImageList": {"input": {"required": {"segs": ["SEGS"]}, "optional": {"fallback_image_opt": ["IMAGE"]}}, "input_order": {"required": ["segs"], "optional": ["fallback_image_opt"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "name": "SEGSToImageList", "display_name": "SEGSToImageList", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSToMaskList": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [true], "output_name": ["MASK"], "name": "ImpactSEGSToMaskList", "display_name": "SEGS to Mask List", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSToMaskBatch": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "ImpactSEGSToMaskBatch", "display_name": "SEGS to Mask Batch", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSConcat": {"input": {"required": {"segs1": ["SEGS"]}}, "input_order": {"required": ["segs1"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSEGSConcat", "display_name": "SEGS Concat", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSPicker": {"input": {"required": {"picks": ["STRING", {"multiline": true, "dynamicPrompts": false, "pysssss.autocomplete": false}], "segs": ["SEGS"]}, "optional": {"fallback_image_opt": ["IMAGE"]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["picks", "segs"], "optional": ["fallback_image_opt"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSEGSPicker", "display_name": "Picker (SEGS)", "description": "This node provides a function to select only the chosen SEGS from the input SEGS.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactMakeTileSEGS": {"input": {"required": {"images": ["IMAGE"], "bbox_size": ["INT", {"default": 512, "min": 64, "max": 4096, "step": 8}], "crop_factor": ["FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.01}], "min_overlap": ["INT", {"default": 5, "min": 0, "max": 512, "step": 1}], "filter_segs_dilation": ["INT", {"default": 20, "min": -255, "max": 255, "step": 1}], "mask_irregularity": ["FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}], "irregular_mask_mode": [["Reuse fast", "Reuse quality", "All random fast", "All random quality"]]}, "optional": {"filter_in_segs_opt": ["SEGS"], "filter_out_segs_opt": ["SEGS"]}}, "input_order": {"required": ["images", "bbox_size", "crop_factor", "min_overlap", "filter_segs_dilation", "mask_irregularity", "irregular_mask_mode"], "optional": ["filter_in_segs_opt", "filter_out_segs_opt"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactMakeTileSEGS", "display_name": "Make Tile SEGS", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/__for_testing", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSMerge": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSEGSMerge", "display_name": "SEGS Merge", "description": "SEGS contains multiple SEGs. SEGS Merge integrates several SEGs into a single merged SEG. The label is changed to `merged` and the confidence becomes the minimum confidence. The applied controlnet and cropped_image are removed.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SEGSDetailerForAnimateDiff": {"input": {"required": {"image_frames": ["IMAGE"], "segs": ["SEGS"], "guide_size": ["FLOAT", {"default": 512, "min": 64, "max": 16384, "step": 8}], "guide_size_for": ["BOOLEAN", {"default": true, "label_on": "bbox", "label_off": "crop_region"}], "max_size": ["FLOAT", {"default": 768, "min": 64, "max": 16384, "step": 8}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "denoise": ["FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}], "basic_pipe": ["BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}], "refiner_ratio": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}]}, "optional": {"refiner_basic_pipe_opt": ["BASIC_PIPE"], "noise_mask_feather": ["INT", {"default": 20, "min": 0, "max": 100, "step": 1}], "scheduler_func_opt": ["SCHEDULER_FUNC"]}}, "input_order": {"required": ["image_frames", "segs", "guide_size", "guide_size_for", "max_size", "seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", "basic_pipe", "refiner_ratio"], "optional": ["refiner_basic_pipe_opt", "noise_mask_feather", "scheduler_func_opt"]}, "is_input_list": false, "output": ["SEGS", "IMAGE"], "output_is_list": [false, true], "output_name": ["segs", "cnet_images"], "name": "SEGSDetailerForAnimateDiff", "display_name": "SEGSDetailer For Video (SEGS/pipe)", "description": "This node enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.\nThis node is applied specifically to SEGS rather than the entire image. To apply it to the entire image, use the 'SEGS Paste' node.\nAs a specialized detailer node for improving video details, such as in AnimateDiff, this node can handle cases where the masks contained in SEGS serve as batch masks spanning multiple frames.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Detailer", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactKSamplerBasicPipe": {"input": {"required": {"basic_pipe": ["BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}], "seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Random seed to use for generating CPU noise for sampling."}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"tooltip": "sampler"}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"], {"tooltip": "noise schedule"}], "latent_image": ["LATENT", {"tooltip": "input latent image"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}]}, "optional": {"scheduler_func_opt": ["SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}]}}, "input_order": {"required": ["basic_pipe", "seed", "steps", "cfg", "sampler_name", "scheduler", "latent_image", "denoise"], "optional": ["scheduler_func_opt"]}, "is_input_list": false, "output": ["BASIC_PIPE", "LATENT", "VAE"], "output_is_list": [false, false, false], "output_name": ["BASIC_PIPE", "LATENT", "VAE"], "name": "ImpactKSamplerBasicPipe", "display_name": "KSampler (pipe)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/sampling", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["passthrough input basic_pipe", "result latent", "VAE in basic_pipe"], "search_aliases": []}, "ImpactKSamplerAdvancedBasicPipe": {"input": {"required": {"basic_pipe": ["BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}], "add_noise": ["BOOLEAN", {"default": true, "label_on": "enable", "label_off": "disable", "tooltip": "Whether to add noise"}], "noise_seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Random seed to use for generating CPU noise for sampling."}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"tooltip": "sampler"}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"], {"tooltip": "noise schedule"}], "latent_image": ["LATENT", {"tooltip": "input latent image"}], "start_at_step": ["INT", {"default": 0, "min": 0, "max": 10000, "tooltip": "The starting step of the sampling to be applied at this node within the range of 'steps'."}], "end_at_step": ["INT", {"default": 10000, "min": 0, "max": 10000, "tooltip": "The step at which sampling applied at this node will stop within the range of steps (if greater than steps, sampling will continue only up to steps)."}], "return_with_leftover_noise": ["BOOLEAN", {"default": false, "label_on": "enable", "label_off": "disable", "tooltip": "Whether to return the latent with noise remaining if the noise has not been completely removed according to the noise schedule, or to completely remove the noise before returning it."}]}, "optional": {"scheduler_func_opt": ["SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}]}}, "input_order": {"required": ["basic_pipe", "add_noise", "noise_seed", "steps", "cfg", "sampler_name", "scheduler", "latent_image", "start_at_step", "end_at_step", "return_with_leftover_noise"], "optional": ["scheduler_func_opt"]}, "is_input_list": false, "output": ["BASIC_PIPE", "LATENT", "VAE"], "output_is_list": [false, false, false], "output_name": ["BASIC_PIPE", "LATENT", "VAE"], "name": "ImpactKSamplerAdvancedBasicPipe", "display_name": "KSampler (Advanced/pipe)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/sampling", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["passthrough input basic_pipe", "result latent", "VAE in basic_pipe"], "search_aliases": []}, "ReencodeLatent": {"input": {"required": {"samples": ["LATENT"], "tile_mode": [["None", "Both", "Decode(input) only", "Encode(output) only"]], "input_vae": ["VAE"], "output_vae": ["VAE"], "tile_size": ["INT", {"default": 512, "min": 320, "max": 4096, "step": 64}]}, "optional": {"overlap": ["INT", {"default": 64, "min": 0, "max": 4096, "step": 32, "tooltip": "This setting applies when 'tile_mode' is enabled."}]}}, "input_order": {"required": ["samples", "tile_mode", "input_vae", "output_vae", "tile_size"], "optional": ["overlap"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "ReencodeLatent", "display_name": "Reencode Latent", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ReencodeLatentPipe": {"input": {"required": {"samples": ["LATENT"], "tile_mode": [["None", "Both", "Decode(input) only", "Encode(output) only"]], "input_basic_pipe": ["BASIC_PIPE"], "output_basic_pipe": ["BASIC_PIPE"]}}, "input_order": {"required": ["samples", "tile_mode", "input_basic_pipe", "output_basic_pipe"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "ReencodeLatentPipe", "display_name": "Reencode Latent (pipe)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactImageBatchToImageList": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "name": "ImpactImageBatchToImageList", "display_name": "Image Batch to Image List", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactMakeImageList": {"input": {"optional": {"image1": ["IMAGE"]}}, "input_order": {"optional": ["image1"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [true], "output_name": ["IMAGE"], "name": "ImpactMakeImageList", "display_name": "Make Image List", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactMakeImageBatch": {"input": {"optional": {"image1": ["IMAGE"]}}, "input_order": {"optional": ["image1"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImpactMakeImageBatch", "display_name": "Make Image Batch", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactMakeAnyList": {"input": {"required": {}, "optional": {"value1": ["*"]}}, "input_order": {"required": [], "optional": ["value1"]}, "is_input_list": false, "output": ["*"], "output_is_list": [true], "output_name": ["*"], "name": "ImpactMakeAnyList", "display_name": "Make List (Any)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactMakeMaskList": {"input": {"required": {"mask1": ["MASK"]}}, "input_order": {"required": ["mask1"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [true], "output_name": ["MASK"], "name": "ImpactMakeMaskList", "display_name": "Make Mask List", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactMakeMaskBatch": {"input": {"optional": {"mask1": ["MASK"]}}, "input_order": {"optional": ["mask1"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "ImpactMakeMaskBatch", "display_name": "Make Mask Batch", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSelectNthItemOfAnyList": {"input": {"required": {"any_list": ["*"], "index": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "tooltip": "The index of the item you want to select from the list."}]}}, "input_order": {"required": ["any_list", "index"]}, "is_input_list": true, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "ImpactSelectNthItemOfAnyList", "display_name": "Select Nth Item (Any list)", "description": "Selects the Nth item from a list. If the index is out of range, it returns the last item in the list.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RegionalSampler": {"input": {"required": {"seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Random seed to use for generating CPU noise for sampling."}], "seed_2nd": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Additional noise seed. The behavior is determined by seed_2nd_mode."}], "seed_2nd_mode": [["ignore", "fixed", "seed+seed_2nd", "seed-seed_2nd", "increment", "decrement", "randomize"], {"tooltip": "application method of seed_2nd. 1) ignore: Do not use seed_2nd. In the base only sampling stage, the seed is applied as a noise seed, and in the regional sampling stage, denoising is performed as it is without additional noise. 2) Others: In the base only sampling stage, the seed is applied as a noise seed, and once it is closed so that there is no leftover noise, new noise is added with seed_2nd and the regional samping stage is performed. a) fixed: Use seed_2nd as it is as an additional noise seed. b) seed+seed_2nd: Apply the value of seed+seed_2nd as an additional noise seed. c) seed-seed_2nd: Apply the value of seed-seed_2nd as an additional noise seed. d) increment: Not implemented yet. Same with fixed. e) decrement: Not implemented yet. Same with fixed. f) randomize: Not implemented yet. Same with fixed."}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}], "base_only_steps": ["INT", {"default": 2, "min": 0, "max": 10000, "tooltip": "total sampling steps"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}], "samples": ["LATENT", {"tooltip": "input latent image"}], "base_sampler": ["KSAMPLER_ADVANCED", {"tooltip": "The sampler applied outside the area set by the regional_prompt."}], "regional_prompts": ["REGIONAL_PROMPTS", {"tooltip": "The prompt applied to each region"}], "overlap_factor": ["INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask set in regional_prompts by the overlap_factor amount to overlap with other regions."}], "restore_latent": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled", "tooltip": "At each step, restore the noise outside the mask area to its original state, as per the principle of inpainting. This option is provided for backward compatibility, and it is recommended to always set it to true."}], "additional_mode": [["DISABLE", "ratio additional", "ratio between"], {"default": "ratio between", "tooltip": "..._sde or uni_pc and other special samplers are used, the region is not properly denoised, and it causes a phenomenon that destroys the overall harmony. To compensate for this, a recovery operation is performed using another sampler. This requires a longer time for sampling because a second sampling is performed at each step in each region using a special sampler. 1) DISABLE: Disable this feature. 2) ratio additional: After performing the denoise amount to be performed in the step with the sampler set in the region, the recovery sampler is additionally applied by the additional_sigma_ratio. If you use this option, the total denoise amount increases by additional_sigma_ratio. 3) ratio between: The denoise amount to be performed in the step with the sampler set in the region and the denoise amount to be applied to the recovery sampler are divided by additional_sigma_ratio, and denoise is performed for each denoise amount. If you use this option, the total denoise amount does not change."}], "additional_sampler": [["AUTO", "euler", "heun", "heunpp2", "dpm_2", "dpm_fast", "dpmpp_2m", "ddpm"], {"tooltip": "1) AUTO: Automatically set the recovery sampler. If the sampler is uni_pc, uni_pc_bh2, dpmpp_sde, dpmpp_sde_gpu, the dpm_fast sampler is selected If the sampler is dpmpp_2m_sde, dpmpp_2m_sde_gpu, dpmpp_3m_sde, dpmpp_3m_sde_gpu, the dpmpp_2m sampler is selected. 2) Others: Manually set the recovery sampler."}], "additional_sigma_ratio": ["FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Multiplier of noise schedule to be applied according to additional_mode."}]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["seed", "seed_2nd", "seed_2nd_mode", "steps", "base_only_steps", "denoise", "samples", "base_sampler", "regional_prompts", "overlap_factor", "restore_latent", "additional_mode", "additional_sampler", "additional_sigma_ratio"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "RegionalSampler", "display_name": "RegionalSampler", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Regional", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["result latent"], "search_aliases": []}, "RegionalSamplerAdvanced": {"input": {"required": {"add_noise": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled", "tooltip": "Whether to add noise"}], "noise_seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Random seed to use for generating CPU noise for sampling."}], "steps": ["INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}], "start_at_step": ["INT", {"default": 0, "min": 0, "max": 10000, "tooltip": "The starting step of the sampling to be applied at this node within the range of 'steps'."}], "end_at_step": ["INT", {"default": 10000, "min": 0, "max": 10000, "tooltip": "The step at which sampling applied at this node will stop within the range of steps (if greater than steps, sampling will continue only up to steps)."}], "overlap_factor": ["INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask set in regional_prompts by the overlap_factor amount to overlap with other regions."}], "restore_latent": ["BOOLEAN", {"default": true, "label_on": "enabled", "label_off": "disabled", "tooltip": "At each step, restore the noise outside the mask area to its original state, as per the principle of inpainting. This option is provided for backward compatibility, and it is recommended to always set it to true."}], "return_with_leftover_noise": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled", "tooltip": "Whether to return the latent with noise remaining if the noise has not been completely removed according to the noise schedule, or to completely remove the noise before returning it."}], "latent_image": ["LATENT", {"tooltip": "input latent image"}], "base_sampler": ["KSAMPLER_ADVANCED", {"tooltip": "The sampler applied outside the area set by the regional_prompt."}], "regional_prompts": ["REGIONAL_PROMPTS", {"tooltip": "The prompt applied to each region"}], "additional_mode": [["DISABLE", "ratio additional", "ratio between"], {"default": "ratio between", "tooltip": "..._sde or uni_pc and other special samplers are used, the region is not properly denoised, and it causes a phenomenon that destroys the overall harmony. To compensate for this, a recovery operation is performed using another sampler. This requires a longer time for sampling because a second sampling is performed at each step in each region using a special sampler. 1) DISABLE: Disable this feature. 2) ratio additional: After performing the denoise amount to be performed in the step with the sampler set in the region, the recovery sampler is additionally applied by the additional_sigma_ratio. If you use this option, the total denoise amount increases by additional_sigma_ratio. 3) ratio between: The denoise amount to be performed in the step with the sampler set in the region and the denoise amount to be applied to the recovery sampler are divided by additional_sigma_ratio, and denoise is performed for each denoise amount. If you use this option, the total denoise amount does not change."}], "additional_sampler": [["AUTO", "euler", "heun", "heunpp2", "dpm_2", "dpm_fast", "dpmpp_2m", "ddpm"], {"tooltip": "1) AUTO: Automatically set the recovery sampler. If the sampler is uni_pc, uni_pc_bh2, dpmpp_sde, dpmpp_sde_gpu, the dpm_fast sampler is selected If the sampler is dpmpp_2m_sde, dpmpp_2m_sde_gpu, dpmpp_3m_sde, dpmpp_3m_sde_gpu, the dpmpp_2m sampler is selected. 2) Others: Manually set the recovery sampler."}], "additional_sigma_ratio": ["FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Multiplier of noise schedule to be applied according to additional_mode."}]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["add_noise", "noise_seed", "steps", "start_at_step", "end_at_step", "overlap_factor", "restore_latent", "return_with_leftover_noise", "latent_image", "base_sampler", "regional_prompts", "additional_mode", "additional_sampler", "additional_sigma_ratio"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "RegionalSamplerAdvanced", "display_name": "RegionalSamplerAdvanced", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Regional", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["result latent"], "search_aliases": []}, "CombineRegionalPrompts": {"input": {"required": {"regional_prompts1": ["REGIONAL_PROMPTS", {"tooltip": "input regional_prompts. (Connecting to the input slot increases the number of additional slots.)"}]}}, "input_order": {"required": ["regional_prompts1"]}, "is_input_list": false, "output": ["REGIONAL_PROMPTS"], "output_is_list": [false], "output_name": ["REGIONAL_PROMPTS"], "name": "CombineRegionalPrompts", "display_name": "CombineRegionalPrompts", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Regional", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["Combined REGIONAL_PROMPTS"], "search_aliases": []}, "RegionalPrompt": {"input": {"required": {"mask": ["MASK", {"tooltip": "region mask"}], "advanced_sampler": ["KSAMPLER_ADVANCED", {"tooltip": "sampler for specified region"}]}, "optional": {"variation_seed": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615, "tooltip": "Sets the extra seed to be used for noise variation."}], "variation_strength": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Sets the strength of the noise variation."}], "variation_method": [["linear", "slerp"], {"tooltip": "Sets how the original noise and extra noise are blended together."}]}}, "input_order": {"required": ["mask", "advanced_sampler"], "optional": ["variation_seed", "variation_strength", "variation_method"]}, "is_input_list": false, "output": ["REGIONAL_PROMPTS"], "output_is_list": [false], "output_name": ["REGIONAL_PROMPTS"], "name": "RegionalPrompt", "display_name": "RegionalPrompt", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Regional", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["regional prompts. (Can be used in the RegionalSampler.)"], "search_aliases": []}, "ImpactCombineConditionings": {"input": {"required": {"conditioning1": ["CONDITIONING", {"tooltip": "input conditionings. (Connecting to the input slot increases the number of additional slots.)"}]}}, "input_order": {"required": ["conditioning1"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ImpactCombineConditionings", "display_name": "Combine Conditionings", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["Combined conditioning"], "search_aliases": []}, "ImpactConcatConditionings": {"input": {"required": {"conditioning1": ["CONDITIONING", {"tooltip": "input conditionings. (Connecting to the input slot increases the number of additional slots.)"}]}}, "input_order": {"required": ["conditioning1"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "ImpactConcatConditionings", "display_name": "Concat Conditionings", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["Concatenated conditioning"], "search_aliases": []}, "ImpactSEGSLabelAssign": {"input": {"required": {"segs": ["SEGS"], "labels": ["STRING", {"multiline": true, "placeholder": "List the label to be assigned in order of segs, separated by commas"}]}}, "input_order": {"required": ["segs", "labels"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["SEGS"], "name": "ImpactSEGSLabelAssign", "display_name": "SEGS Assign (label)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSLabelFilter": {"input": {"required": {"segs": ["SEGS"], "preset": [["all", "hand", "face", "mouth", "eyes", "eyebrows", "pupils", "left_eyebrow", "left_eye", "left_pupil", "right_eyebrow", "right_eye", "right_pupil", "short_sleeved_shirt", "long_sleeved_shirt", "short_sleeved_outwear", "long_sleeved_outwear", "vest", "sling", "shorts", "trousers", "skirt", "short_sleeved_dress", "long_sleeved_dress", "vest_dress", "sling_dress", "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]], "labels": ["STRING", {"multiline": true, "placeholder": "List the types of segments to be allowed, separated by commas"}]}}, "input_order": {"required": ["segs", "preset", "labels"]}, "is_input_list": false, "output": ["SEGS", "SEGS"], "output_is_list": [false, false], "output_name": ["filtered_SEGS", "remained_SEGS"], "name": "ImpactSEGSLabelFilter", "display_name": "SEGS Filter (label)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSRangeFilter": {"input": {"required": {"segs": ["SEGS"], "target": [["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "length_percent", "confidence(0-100)"]], "mode": ["BOOLEAN", {"default": true, "label_on": "inside", "label_off": "outside"}], "min_value": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "max_value": ["INT", {"default": 67108864, "min": 0, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["segs", "target", "mode", "min_value", "max_value"]}, "is_input_list": false, "output": ["SEGS", "SEGS"], "output_is_list": [false, false], "output_name": ["filtered_SEGS", "remained_SEGS"], "name": "ImpactSEGSRangeFilter", "display_name": "SEGS Filter (range)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSOrderedFilter": {"input": {"required": {"segs": ["SEGS"], "target": [["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "confidence", "none"]], "order": ["BOOLEAN", {"default": true, "label_on": "descending", "label_off": "ascending"}], "take_start": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}], "take_count": ["INT", {"default": 1, "min": 0, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["segs", "target", "order", "take_start", "take_count"]}, "is_input_list": false, "output": ["SEGS", "SEGS"], "output_is_list": [false, false], "output_name": ["filtered_SEGS", "remained_SEGS"], "name": "ImpactSEGSOrderedFilter", "display_name": "SEGS Filter (ordered)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSIntersectionFilter": {"input": {"required": {"segs1": ["SEGS"], "segs2": ["SEGS"], "ioa_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["segs1", "segs2", "ioa_threshold"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["filtered_SEGS"], "name": "ImpactSEGSIntersectionFilter", "display_name": "SEGS Filter (intersection)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSNMSFilter": {"input": {"required": {"segs": ["SEGS"], "iou_threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["segs", "iou_threshold"]}, "is_input_list": false, "output": ["SEGS"], "output_is_list": [false], "output_name": ["filtered_SEGS"], "name": "ImpactSEGSNMSFilter", "display_name": "SEGS Filter (non max suppression)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactCompare": {"input": {"required": {"cmp": [["a = b", "a <> b", "a > b", "a < b", "a >= b", "a <= b", "tt", "ff"]], "a": ["*"], "b": ["*"]}}, "input_order": {"required": ["cmp", "a", "b"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "ImpactCompare", "display_name": "ImpactCompare", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactConditionalBranch": {"input": {"required": {"cond": ["BOOLEAN"], "tt_value": ["*", {"lazy": true}], "ff_value": ["*", {"lazy": true}]}}, "input_order": {"required": ["cond", "tt_value", "ff_value"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "ImpactConditionalBranch", "display_name": "ImpactConditionalBranch", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactConditionalBranchSelMode": {"input": {"required": {"cond": ["BOOLEAN"]}, "optional": {"tt_value": ["*"], "ff_value": ["*"]}}, "input_order": {"required": ["cond"], "optional": ["tt_value", "ff_value"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "ImpactConditionalBranchSelMode", "display_name": "ImpactConditionalBranchSelMode", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactIfNone": {"input": {"required": {}, "optional": {"signal": ["*"], "any_input": ["*"]}}, "input_order": {"required": [], "optional": ["signal", "any_input"]}, "is_input_list": false, "output": ["*", "BOOLEAN"], "output_is_list": [false, false], "output_name": ["signal_opt", "bool"], "name": "ImpactIfNone", "display_name": "ImpactIfNone", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactConvertDataType": {"input": {"required": {"value": ["*"]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["STRING", "FLOAT", "INT", "BOOLEAN"], "output_is_list": [false, false, false, false], "output_name": ["STRING", "FLOAT", "INT", "BOOLEAN"], "name": "ImpactConvertDataType", "display_name": "ImpactConvertDataType", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactLogicalOperators": {"input": {"required": {"operator": [["and", "or", "xor"]], "bool_a": ["BOOLEAN", {"forceInput": true}], "bool_b": ["BOOLEAN", {"forceInput": true}]}}, "input_order": {"required": ["operator", "bool_a", "bool_b"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "ImpactLogicalOperators", "display_name": "ImpactLogicalOperators", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactInt": {"input": {"required": {"value": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "ImpactInt", "display_name": "ImpactInt", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactFloat": {"input": {"required": {"value": ["FLOAT", {"default": 1.0, "min": -3.402823466e+38, "max": 3.402823466e+38}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "ImpactFloat", "display_name": "ImpactFloat", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactBoolean": {"input": {"required": {"value": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "ImpactBoolean", "display_name": "ImpactBoolean", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactValueSender": {"input": {"required": {"value": ["*"], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}]}, "optional": {"signal_opt": ["*"]}}, "input_order": {"required": ["value", "link_id"], "optional": ["signal_opt"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["signal"], "name": "ImpactValueSender", "display_name": "ImpactValueSender", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactValueReceiver": {"input": {"required": {"typ": [["STRING", "INT", "FLOAT", "BOOLEAN"]], "value": ["STRING", {"default": ""}], "link_id": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["typ", "value", "link_id"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "ImpactValueReceiver", "display_name": "ImpactValueReceiver", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactImageInfo": {"input": {"required": {"value": ["IMAGE"]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["batch", "height", "width", "channel"], "name": "ImpactImageInfo", "display_name": "ImpactImageInfo", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactLatentInfo": {"input": {"required": {"value": ["LATENT"]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["batch", "height", "width", "channel"], "name": "ImpactLatentInfo", "display_name": "ImpactLatentInfo", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactMinMax": {"input": {"required": {"mode": ["BOOLEAN", {"default": true, "label_on": "max", "label_off": "min"}], "a": ["*"], "b": ["*"]}}, "input_order": {"required": ["mode", "a", "b"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "ImpactMinMax", "display_name": "ImpactMinMax", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactNeg": {"input": {"required": {"value": ["BOOLEAN", {"forceInput": true}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "ImpactNeg", "display_name": "ImpactNeg", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactConditionalStopIteration": {"input": {"required": {"cond": ["BOOLEAN", {"forceInput": true}]}}, "input_order": {"required": ["cond"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "ImpactConditionalStopIteration", "display_name": "ImpactConditionalStopIteration", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactStringSelector": {"input": {"required": {"strings": ["STRING", {"multiline": true}], "multiline": ["BOOLEAN", {"default": false, "label_on": "enabled", "label_off": "disabled"}], "select": ["INT", {"min": 0, "max": 9223372036854775807, "step": 1, "default": 0}]}}, "input_order": {"required": ["strings", "multiline", "select"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "ImpactStringSelector", "display_name": "String Selector", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StringListToString": {"input": {"required": {"join_with": ["STRING", {"default": "\\n"}], "string_list": ["STRING", {"forceInput": true}]}}, "input_order": {"required": ["join_with", "string_list"]}, "is_input_list": true, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "StringListToString", "display_name": "String List to String", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "WildcardPromptFromString": {"input": {"required": {"string": ["STRING", {"forceInput": true}], "delimiter": ["STRING", {"multiline": false, "default": "\\n"}], "prefix_all": ["STRING", {"multiline": false}], "postfix_all": ["STRING", {"multiline": false}], "restrict_to_tags": ["STRING", {"multiline": false}], "exclude_tags": ["STRING", {"multiline": false}]}}, "input_order": {"required": ["string", "delimiter", "prefix_all", "postfix_all", "restrict_to_tags", "exclude_tags"]}, "is_input_list": false, "output": ["STRING", "STRING"], "output_is_list": [false, false], "output_name": ["wildcard", "segs_labels"], "name": "WildcardPromptFromString", "display_name": "Wildcard Prompt from String", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactExecutionOrderController": {"input": {"required": {"signal": ["*"], "value": ["*"]}}, "input_order": {"required": ["signal", "value"]}, "is_input_list": false, "output": ["*", "*"], "output_is_list": [false, false], "output_name": ["signal", "value"], "name": "ImpactExecutionOrderController", "display_name": "Execution Order Controller", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactListBridge": {"input": {"required": {"list_input": ["*"]}}, "input_order": {"required": ["list_input"]}, "is_input_list": true, "output": ["*"], "output_is_list": [true], "output_name": ["list_output"], "name": "ImpactListBridge", "display_name": "List Bridge", "description": "When passing the list output through this node, it collects and organizes the data before forwarding it, which ensures that the previous stage's sub-workflow has been completed.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RemoveNoiseMask": {"input": {"required": {"samples": ["LATENT"]}}, "input_order": {"required": ["samples"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "RemoveNoiseMask", "display_name": "Remove Noise Mask", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactLogger": {"input": {"required": {"data": ["*"], "text": ["STRING", {"multiline": true}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["data", "text"], "hidden": ["prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "ImpactLogger", "display_name": "ImpactLogger", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Debug", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactDummyInput": {"input": {"required": {}}, "input_order": {"required": []}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "ImpactDummyInput", "display_name": "ImpactDummyInput", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Debug", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactQueueTrigger": {"input": {"required": {"signal": ["*"], "mode": ["BOOLEAN", {"default": true, "label_on": "Trigger", "label_off": "Don't trigger"}]}}, "input_order": {"required": ["signal", "mode"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["signal_opt"], "name": "ImpactQueueTrigger", "display_name": "Queue Trigger", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactQueueTriggerCountdown": {"input": {"required": {"count": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "total": ["INT", {"default": 10, "min": 1, "max": 18446744073709551615}], "mode": ["BOOLEAN", {"default": true, "label_on": "Trigger", "label_off": "Don't trigger"}]}, "optional": {"signal": ["*"]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["count", "total", "mode"], "optional": ["signal"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["*", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["signal_opt", "count", "total"], "name": "ImpactQueueTriggerCountdown", "display_name": "Queue Trigger (Countdown)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactSetWidgetValue": {"input": {"required": {"signal": ["*"], "node_id": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "widget_name": ["STRING", {"multiline": false}]}, "optional": {"boolean_value": ["BOOLEAN", {"forceInput": true}], "int_value": ["INT", {"forceInput": true}], "float_value": ["FLOAT", {"forceInput": true}], "string_value": ["STRING", {"forceInput": true}]}}, "input_order": {"required": ["signal", "node_id", "widget_name"], "optional": ["boolean_value", "int_value", "float_value", "string_value"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["signal_opt"], "name": "ImpactSetWidgetValue", "display_name": "Set Widget Value", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactNodeSetMuteState": {"input": {"required": {"signal": ["*"], "node_id": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "set_state": ["BOOLEAN", {"default": true, "label_on": "active", "label_off": "mute"}]}}, "input_order": {"required": ["signal", "node_id", "set_state"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["signal_opt"], "name": "ImpactNodeSetMuteState", "display_name": "Set Mute State", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactControlBridge": {"input": {"required": {"value": ["*"], "mode": ["BOOLEAN", {"default": true, "label_on": "Active", "label_off": "Stop/Mute/Bypass"}], "behavior": [["Stop", "Mute", "Bypass"]]}, "hidden": {"unique_id": "UNIQUE_ID", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["value", "mode", "behavior"], "hidden": ["unique_id", "prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["value"], "name": "ImpactControlBridge", "display_name": "Control Bridge", "description": "When behavior is Stop and mode is active, the input value is passed directly to the output.\nWhen behavior is Mute/Bypass and mode is active, the node connected to the output is changed to active state.\nWhen behavior is Stop and mode is Stop/Mute/Bypass, the workflow execution of the current node is halted.\nWhen behavior is Mute/Bypass and mode is Stop/Mute/Bypass, the node connected to the output is changed to Mute/Bypass state.", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactIsNotEmptySEGS": {"input": {"required": {"segs": ["SEGS"]}}, "input_order": {"required": ["segs"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "ImpactIsNotEmptySEGS", "display_name": "SEGS isn't Empty", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSleep": {"input": {"required": {"signal": ["*"], "seconds": ["FLOAT", {"default": 0.5, "min": 0, "max": 3600}]}}, "input_order": {"required": ["signal", "seconds"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["signal_opt"], "name": "ImpactSleep", "display_name": "Sleep", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactRemoteBoolean": {"input": {"required": {"node_id": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "widget_name": ["STRING", {"multiline": false}], "value": ["BOOLEAN", {"default": true, "label_on": "True", "label_off": "False"}]}}, "input_order": {"required": ["node_id", "widget_name", "value"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "ImpactRemoteBoolean", "display_name": "Remote Boolean (on prompt)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactRemoteInt": {"input": {"required": {"node_id": ["INT", {"default": 0, "min": 0, "max": 18446744073709551615}], "widget_name": ["STRING", {"multiline": false}], "value": ["INT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615}]}}, "input_order": {"required": ["node_id", "widget_name", "value"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "ImpactRemoteInt", "display_name": "Remote Int (on prompt)", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Logic/_for_test", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImpactHFTransformersClassifierProvider": {"input": {"required": {"preset_repo_id": [["rizvandwiki/gender-classification-2", "NTQAI/pedestrian_gender_recognition", "Leilab/gender_class", "ProjectPersonal/GenderClassifier", "crangana/trained-gender", "cledoux42/GenderNew_v002", "ivensamdh/genderage2", "Manual repo id"]], "manual_repo_id": ["STRING", {"multiline": false}], "device_mode": [["AUTO", "Prefer GPU", "CPU"]]}}, "input_order": {"required": ["preset_repo_id", "manual_repo_id", "device_mode"]}, "is_input_list": false, "output": ["TRANSFORMERS_CLASSIFIER"], "output_is_list": [false], "output_name": ["TRANSFORMERS_CLASSIFIER"], "name": "ImpactHFTransformersClassifierProvider", "display_name": "HF Transformers Classifier Provider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/HuggingFace", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSEGSClassify": {"input": {"required": {"classifier": ["TRANSFORMERS_CLASSIFIER"], "segs": ["SEGS"], "preset_expr": [["#Female > #Male", "#Female < #Male", "female > 0.5", "male > 0.5", "Age16to25 > 0.1", "Age50to69 > 0.1", "Manual expr"]], "manual_expr": ["STRING", {"multiline": false}]}, "optional": {"ref_image_opt": ["IMAGE"]}}, "input_order": {"required": ["classifier", "segs", "preset_expr", "manual_expr"], "optional": ["ref_image_opt"]}, "is_input_list": false, "output": ["SEGS", "SEGS", "STRING"], "output_is_list": [false, false, true], "output_name": ["filtered_SEGS", "remained_SEGS", "detected_labels"], "name": "ImpactSEGSClassify", "display_name": "SEGS Classify", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/HuggingFace", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImpactSchedulerAdapter": {"input": {"required": {"scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], {"defaultInput": true}], "extra_scheduler": [["None", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]]}}, "input_order": {"required": ["scheduler", "extra_scheduler"]}, "is_input_list": false, "output": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal", "AYS SDXL", "AYS SD1", "AYS SVD", "GITS[coeff=1.2]", "LTXV[default]", "OSS FLUX", "OSS Wan", "OSS Chroma"]], "output_is_list": [false], "output_name": ["scheduler"], "name": "ImpactSchedulerAdapter", "display_name": "Impact Scheduler Adapter", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/Util", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GITSSchedulerFuncProvider": {"input": {"required": {"coeff": ["FLOAT", {"default": 1.2, "min": 0.8, "max": 1.5, "step": 0.05, "tooltip": "coeff factor of GITS Scheduler"}], "denoise": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "denoise amount for noise schedule"}]}}, "input_order": {"required": ["coeff", "denoise"]}, "is_input_list": false, "output": ["SCHEDULER_FUNC"], "output_is_list": [false], "output_name": ["SCHEDULER_FUNC"], "name": "GITSSchedulerFuncProvider", "display_name": "GITSScheduler Func Provider", "description": "", "python_module": "custom_nodes.comfyui-impact-pack", "category": "ImpactPack/sampling", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["Returns a function that generates a noise schedule using GITSScheduler. This can be used in place of a predetermined noise schedule to dynamically generate a noise schedule based on the steps."], "search_aliases": []}, "BOOLConstant": {"input": {"required": {"value": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["value"], "name": "BOOLConstant", "display_name": "BOOL Constant", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/constants", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "INTConstant": {"input": {"required": {"value": ["INT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["value"], "name": "INTConstant", "display_name": "INT Constant", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/constants", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FloatConstant": {"input": {"required": {"value": ["FLOAT", {"default": 0.0, "min": -18446744073709551615, "max": 18446744073709551615, "step": 1e-05}]}}, "input_order": {"required": ["value"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["value"], "name": "FloatConstant", "display_name": "Float Constant", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/constants", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StringConstant": {"input": {"required": {"string": ["STRING", {"default": "", "multiline": false}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "StringConstant", "display_name": "String Constant", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/constants", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StringConstantMultiline": {"input": {"required": {"string": ["STRING", {"default": "", "multiline": true}], "strip_newlines": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["string", "strip_newlines"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "StringConstantMultiline", "display_name": "String Constant Multiline", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/constants", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningMultiCombine": {"input": {"required": {"inputcount": ["INT", {"default": 2, "min": 2, "max": 20, "step": 1}], "operation": [["combine", "concat"], {"default": "combine"}], "conditioning_1": ["CONDITIONING"], "conditioning_2": ["CONDITIONING"]}}, "input_order": {"required": ["inputcount", "operation", "conditioning_1", "conditioning_2"]}, "is_input_list": false, "output": ["CONDITIONING", "INT"], "output_is_list": [false, false], "output_name": ["combined", "inputcount"], "name": "ConditioningMultiCombine", "display_name": "Conditioning Multi Combine", "description": "\nCombines multiple conditioning nodes into one\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningSetMaskAndCombine": {"input": {"required": {"positive_1": ["CONDITIONING"], "negative_1": ["CONDITIONING"], "positive_2": ["CONDITIONING"], "negative_2": ["CONDITIONING"], "mask_1": ["MASK"], "mask_2": ["MASK"], "mask_1_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_2_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}}, "input_order": {"required": ["positive_1", "negative_1", "positive_2", "negative_2", "mask_1", "mask_2", "mask_1_strength", "mask_2_strength", "set_cond_area"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["combined_positive", "combined_negative"], "name": "ConditioningSetMaskAndCombine", "display_name": "ConditioningSetMaskAndCombine", "description": "\nBundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningSetMaskAndCombine3": {"input": {"required": {"positive_1": ["CONDITIONING"], "negative_1": ["CONDITIONING"], "positive_2": ["CONDITIONING"], "negative_2": ["CONDITIONING"], "positive_3": ["CONDITIONING"], "negative_3": ["CONDITIONING"], "mask_1": ["MASK"], "mask_2": ["MASK"], "mask_3": ["MASK"], "mask_1_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_2_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_3_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}}, "input_order": {"required": ["positive_1", "negative_1", "positive_2", "negative_2", "positive_3", "negative_3", "mask_1", "mask_2", "mask_3", "mask_1_strength", "mask_2_strength", "mask_3_strength", "set_cond_area"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["combined_positive", "combined_negative"], "name": "ConditioningSetMaskAndCombine3", "display_name": "ConditioningSetMaskAndCombine3", "description": "\nBundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningSetMaskAndCombine4": {"input": {"required": {"positive_1": ["CONDITIONING"], "negative_1": ["CONDITIONING"], "positive_2": ["CONDITIONING"], "negative_2": ["CONDITIONING"], "positive_3": ["CONDITIONING"], "negative_3": ["CONDITIONING"], "positive_4": ["CONDITIONING"], "negative_4": ["CONDITIONING"], "mask_1": ["MASK"], "mask_2": ["MASK"], "mask_3": ["MASK"], "mask_4": ["MASK"], "mask_1_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_2_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_3_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_4_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}}, "input_order": {"required": ["positive_1", "negative_1", "positive_2", "negative_2", "positive_3", "negative_3", "positive_4", "negative_4", "mask_1", "mask_2", "mask_3", "mask_4", "mask_1_strength", "mask_2_strength", "mask_3_strength", "mask_4_strength", "set_cond_area"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["combined_positive", "combined_negative"], "name": "ConditioningSetMaskAndCombine4", "display_name": "ConditioningSetMaskAndCombine4", "description": "\nBundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConditioningSetMaskAndCombine5": {"input": {"required": {"positive_1": ["CONDITIONING"], "negative_1": ["CONDITIONING"], "positive_2": ["CONDITIONING"], "negative_2": ["CONDITIONING"], "positive_3": ["CONDITIONING"], "negative_3": ["CONDITIONING"], "positive_4": ["CONDITIONING"], "negative_4": ["CONDITIONING"], "positive_5": ["CONDITIONING"], "negative_5": ["CONDITIONING"], "mask_1": ["MASK"], "mask_2": ["MASK"], "mask_3": ["MASK"], "mask_4": ["MASK"], "mask_5": ["MASK"], "mask_1_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_2_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_3_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_4_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "mask_5_strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "set_cond_area": [["default", "mask bounds"]]}}, "input_order": {"required": ["positive_1", "negative_1", "positive_2", "negative_2", "positive_3", "negative_3", "positive_4", "negative_4", "positive_5", "negative_5", "mask_1", "mask_2", "mask_3", "mask_4", "mask_5", "mask_1_strength", "mask_2_strength", "mask_3_strength", "mask_4_strength", "mask_5_strength", "set_cond_area"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["combined_positive", "combined_negative"], "name": "ConditioningSetMaskAndCombine5", "display_name": "ConditioningSetMaskAndCombine5", "description": "\nBundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/conditioning", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CondPassThrough": {"input": {"required": {}, "optional": {"positive": ["CONDITIONING"], "negative": ["CONDITIONING"]}}, "input_order": {"required": [], "optional": ["positive", "negative"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING"], "output_is_list": [false, false], "output_name": ["positive", "negative"], "name": "CondPassThrough", "display_name": "CondPassThrough", "description": "\n Simply passes through the positive and negative conditioning,\n workaround for Set node not allowing bypassed inputs.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DownloadAndLoadCLIPSeg": {"input": {"required": {"model": [["Kijai/clipseg-rd64-refined-fp16", "CIDAS/clipseg-rd64-refined"]]}}, "input_order": {"required": ["model"]}, "is_input_list": false, "output": ["CLIPSEGMODEL"], "output_is_list": [false], "output_name": ["clipseg_model"], "name": "DownloadAndLoadCLIPSeg", "display_name": "(Down)load CLIPSeg", "description": "\nDownloads and loads CLIPSeg model with huggingface_hub, \nto ComfyUI/models/clip_seg\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BatchCLIPSeg": {"input": {"required": {"images": ["IMAGE"], "text": ["STRING", {"multiline": false}], "threshold": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step": 0.001}], "binary_mask": ["BOOLEAN", {"default": true}], "combine_mask": ["BOOLEAN", {"default": false}], "use_cuda": ["BOOLEAN", {"default": true}]}, "optional": {"blur_sigma": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}], "opt_model": ["CLIPSEGMODEL"], "prev_mask": ["MASK", {"default": null}], "image_bg_level": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "invert": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["images", "text", "threshold", "binary_mask", "combine_mask", "use_cuda"], "optional": ["blur_sigma", "opt_model", "prev_mask", "image_bg_level", "invert"]}, "is_input_list": false, "output": ["MASK", "IMAGE"], "output_is_list": [false, false], "output_name": ["Mask", "Image"], "name": "BatchCLIPSeg", "display_name": "Batch CLIPSeg", "description": "\nSegments an image or batch of images using CLIPSeg.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ColorToMask": {"input": {"required": {"images": ["IMAGE"], "invert": ["BOOLEAN", {"default": false}], "red": ["INT", {"default": 0, "min": 0, "max": 255, "step": 1}], "green": ["INT", {"default": 0, "min": 0, "max": 255, "step": 1}], "blue": ["INT", {"default": 0, "min": 0, "max": 255, "step": 1}], "threshold": ["INT", {"default": 10, "min": 0, "max": 255, "step": 1}], "per_batch": ["INT", {"default": 16, "min": 1, "max": 4096, "step": 1}]}}, "input_order": {"required": ["images", "invert", "red", "green", "blue", "threshold", "per_batch"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "ColorToMask", "display_name": "Color To Mask", "description": "\nConverts chosen RGB value to a mask. \nWith batch inputs, the **per_batch** \ncontrols the number of images processed at once.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateGradientMask": {"input": {"required": {"invert": ["BOOLEAN", {"default": false}], "frames": ["INT", {"default": 0, "min": 0, "max": 255, "step": 1}], "width": ["INT", {"default": 256, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 256, "min": 16, "max": 4096, "step": 1}]}}, "input_order": {"required": ["invert", "frames", "width", "height"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "CreateGradientMask", "display_name": "Create Gradient Mask", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateTextMask": {"input": {"required": {"invert": ["BOOLEAN", {"default": false}], "frames": ["INT", {"default": 1, "min": 1, "max": 4096, "step": 1}], "text_x": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}], "text_y": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}], "font_size": ["INT", {"default": 32, "min": 8, "max": 4096, "step": 1}], "font_color": ["STRING", {"default": "white"}], "text": ["STRING", {"default": "HELLO!", "multiline": true}], "font": [["FreeMono.ttf", "FreeMonoBoldOblique.otf", "TTNorms-Black.otf"]], "width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "start_rotation": ["INT", {"default": 0, "min": 0, "max": 359, "step": 1}], "end_rotation": ["INT", {"default": 0, "min": -359, "max": 359, "step": 1}]}}, "input_order": {"required": ["invert", "frames", "text_x", "text_y", "font_size", "font_color", "text", "font", "width", "height", "start_rotation", "end_rotation"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "CreateTextMask", "display_name": "Create Text Mask", "description": "\nCreates a text image and mask. \nLooks for fonts from this folder: \nComfyUI/custom_nodes/ComfyUI-KJNodes/fonts\n \nIf start_rotation and/or end_rotation are different values, \ncreates animation between them.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateAudioMask": {"input": {"required": {"invert": ["BOOLEAN", {"default": false}], "frames": ["INT", {"default": 16, "min": 1, "max": 255, "step": 1}], "scale": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 2.0, "step": 0.01}], "audio_path": ["STRING", {"default": "audio.wav"}], "width": ["INT", {"default": 256, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 256, "min": 16, "max": 4096, "step": 1}]}}, "input_order": {"required": ["invert", "frames", "scale", "audio_path", "width", "height"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "CreateAudioMask", "display_name": "Create Audio Mask", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/deprecated", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateFadeMask": {"input": {"required": {"invert": ["BOOLEAN", {"default": false}], "frames": ["INT", {"default": 2, "min": 2, "max": 10000, "step": 1}], "width": ["INT", {"default": 256, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 256, "min": 16, "max": 4096, "step": 1}], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out"]], "start_level": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "midpoint_level": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "end_level": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "midpoint_frame": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}]}}, "input_order": {"required": ["invert", "frames", "width", "height", "interpolation", "start_level", "midpoint_level", "end_level", "midpoint_frame"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "CreateFadeMask", "display_name": "Create Fade Mask", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/deprecated", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateFadeMaskAdvanced": {"input": {"required": {"points_string": ["STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": true}], "invert": ["BOOLEAN", {"default": false}], "frames": ["INT", {"default": 16, "min": 2, "max": 10000, "step": 1}], "width": ["INT", {"default": 512, "min": 1, "max": 4096, "step": 1}], "height": ["INT", {"default": 512, "min": 1, "max": 4096, "step": 1}], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out", "none", "default_to_black"]]}}, "input_order": {"required": ["points_string", "invert", "frames", "width", "height", "interpolation"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "CreateFadeMaskAdvanced", "display_name": "Create Fade Mask Advanced", "description": "\nCreate a batch of masks interpolated between given frames and values. \nUses same syntax as Fizz' BatchValueSchedule.\nFirst value is the frame index (not that this starts from 0, not 1) \nand the second value inside the brackets is the float value of the mask in range 0.0 - 1.0 \n\nFor example the default values: \n0:(0.0) \n7:(1.0) \n15:(0.0) \n \nWould create a mask batch fo 16 frames, starting from black, \ninterpolating with the chosen curve to fully white at the 8th frame, \nand interpolating from that to fully black at the 16th frame.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateFluidMask": {"input": {"required": {"invert": ["BOOLEAN", {"default": false}], "frames": ["INT", {"default": 1, "min": 1, "max": 4096, "step": 1}], "width": ["INT", {"default": 256, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 256, "min": 16, "max": 4096, "step": 1}], "inflow_count": ["INT", {"default": 3, "min": 0, "max": 255, "step": 1}], "inflow_velocity": ["INT", {"default": 1, "min": 0, "max": 255, "step": 1}], "inflow_radius": ["INT", {"default": 8, "min": 0, "max": 255, "step": 1}], "inflow_padding": ["INT", {"default": 50, "min": 0, "max": 255, "step": 1}], "inflow_duration": ["INT", {"default": 60, "min": 0, "max": 255, "step": 1}]}}, "input_order": {"required": ["invert", "frames", "width", "height", "inflow_count", "inflow_velocity", "inflow_radius", "inflow_padding", "inflow_duration"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "CreateFluidMask", "display_name": "Create Fluid Mask", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateShapeMask": {"input": {"required": {"shape": [["circle", "square", "triangle"], {"default": "circle"}], "frames": ["INT", {"default": 1, "min": 1, "max": 4096, "step": 1}], "location_x": ["INT", {"default": 256, "min": 0, "max": 4096, "step": 1}], "location_y": ["INT", {"default": 256, "min": 0, "max": 4096, "step": 1}], "grow": ["INT", {"default": 0, "min": -512, "max": 512, "step": 1}], "frame_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "shape_width": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 1}], "shape_height": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 1}]}}, "input_order": {"required": ["shape", "frames", "location_x", "location_y", "grow", "frame_width", "frame_height", "shape_width", "shape_height"]}, "is_input_list": false, "output": ["MASK", "MASK"], "output_is_list": [false, false], "output_name": ["mask", "mask_inverted"], "name": "CreateShapeMask", "display_name": "Create Shape Mask", "description": "\nCreates a mask or batch of masks with the specified shape. \nLocations are center locations. \nGrow value is the amount to grow the shape on each frame, creating animated masks.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateVoronoiMask": {"input": {"required": {"frames": ["INT", {"default": 16, "min": 2, "max": 4096, "step": 1}], "num_points": ["INT", {"default": 15, "min": 1, "max": 4096, "step": 1}], "line_width": ["INT", {"default": 4, "min": 1, "max": 4096, "step": 1}], "speed": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "frame_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}]}}, "input_order": {"required": ["frames", "num_points", "line_width", "speed", "frame_width", "frame_height"]}, "is_input_list": false, "output": ["MASK", "MASK"], "output_is_list": [false, false], "output_name": ["mask", "mask_inverted"], "name": "CreateVoronoiMask", "display_name": "Create Voronoi Mask", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateMagicMask": {"input": {"required": {"frames": ["INT", {"default": 16, "min": 2, "max": 4096, "step": 1}], "depth": ["INT", {"default": 12, "min": 1, "max": 500, "step": 1}], "distortion": ["FLOAT", {"default": 1.5, "min": 0.0, "max": 100.0, "step": 0.01}], "seed": ["INT", {"default": 123, "min": 0, "max": 99999999, "step": 1}], "transitions": ["INT", {"default": 1, "min": 1, "max": 20, "step": 1}], "frame_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}]}}, "input_order": {"required": ["frames", "depth", "distortion", "seed", "transitions", "frame_width", "frame_height"]}, "is_input_list": false, "output": ["MASK", "MASK"], "output_is_list": [false, false], "output_name": ["mask", "mask_inverted"], "name": "CreateMagicMask", "display_name": "Create Magic Mask", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetMaskSizeAndCount": {"input": {"required": {"mask": ["MASK"]}}, "input_order": {"required": ["mask"]}, "is_input_list": false, "output": ["MASK", "INT", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["mask", "width", "height", "count"], "name": "GetMaskSizeAndCount", "display_name": "Get Mask Size & Count", "description": "\nReturns the width, height and batch size of the mask, \nand passes it through unchanged. \n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GrowMaskWithBlur": {"input": {"required": {"mask": ["MASK"], "expand": ["INT", {"default": 0, "min": -16384, "max": 16384, "step": 1}], "incremental_expandrate": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}], "tapered_corners": ["BOOLEAN", {"default": true}], "flip_input": ["BOOLEAN", {"default": false}], "blur_radius": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}], "lerp_alpha": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "decay_factor": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"fill_holes": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["mask", "expand", "incremental_expandrate", "tapered_corners", "flip_input", "blur_radius", "lerp_alpha", "decay_factor"], "optional": ["fill_holes"]}, "is_input_list": false, "output": ["MASK", "MASK"], "output_is_list": [false, false], "output_name": ["mask", "mask_inverted"], "name": "GrowMaskWithBlur", "display_name": "Grow Mask With Blur", "description": "\n# GrowMaskWithBlur\n- mask: Input mask or mask batch\n- expand: Expand or contract mask or mask batch by a given amount\n- incremental_expandrate: increase expand rate by a given amount per frame\n- tapered_corners: use tapered corners\n- flip_input: flip input mask\n- blur_radius: value higher than 0 will blur the mask\n- lerp_alpha: alpha value for interpolation between frames\n- decay_factor: decay value for interpolation between frames\n- fill_holes: fill holes in the mask (slow)", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaskBatchMulti": {"input": {"required": {"inputcount": ["INT", {"default": 2, "min": 2, "max": 1000, "step": 1}], "mask_1": ["MASK"], "mask_2": ["MASK"]}}, "input_order": {"required": ["inputcount", "mask_1", "mask_2"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["masks"], "name": "MaskBatchMulti", "display_name": "Mask Batch Multi", "description": "\nCreates an image batch from multiple masks. \nYou can set how many inputs the node has, \nwith the **inputcount** and clicking update.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "OffsetMask": {"input": {"required": {"mask": ["MASK"], "x": ["INT", {"default": 0, "min": -4096, "max": 16384, "step": 1, "display": "number"}], "y": ["INT", {"default": 0, "min": -4096, "max": 16384, "step": 1, "display": "number"}], "angle": ["INT", {"default": 0, "min": -360, "max": 360, "step": 1, "display": "number"}], "duplication_factor": ["INT", {"default": 1, "min": 1, "max": 1000, "step": 1, "display": "number"}], "roll": ["BOOLEAN", {"default": false}], "incremental": ["BOOLEAN", {"default": false}], "padding_mode": [["empty", "border", "reflection"], {"default": "empty"}]}}, "input_order": {"required": ["mask", "x", "y", "angle", "duplication_factor", "roll", "incremental", "padding_mode"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["mask"], "name": "OffsetMask", "display_name": "Offset Mask", "description": "\nOffsets the mask by the specified amount. \n - mask: Input mask or mask batch\n - x: Horizontal offset\n - y: Vertical offset\n - angle: Angle in degrees\n - roll: roll edge wrapping\n - duplication_factor: Number of times to duplicate the mask to form a batch\n - border padding_mode: Padding mode for the mask\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RemapMaskRange": {"input": {"required": {"mask": ["MASK"], "min": ["FLOAT", {"default": 0.0, "min": -10.0, "max": 1.0, "step": 0.01}], "max": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["mask", "min", "max"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["mask"], "name": "RemapMaskRange", "display_name": "Remap Mask Range", "description": "\nSets new min and max values for the mask.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeMask": {"input": {"required": {"mask": ["MASK"], "width": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1, "display": "number"}], "height": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1, "display": "number"}], "keep_proportions": ["BOOLEAN", {"default": false}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]], "crop": [["disabled", "center"]]}}, "input_order": {"required": ["mask", "width", "height", "keep_proportions", "upscale_method", "crop"]}, "is_input_list": false, "output": ["MASK", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["mask", "width", "height"], "name": "ResizeMask", "display_name": "Resize Mask", "description": "\nResizes the mask or batch of masks to the specified width and height.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RoundMask": {"input": {"required": {"mask": ["MASK"]}}, "input_order": {"required": ["mask"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "RoundMask", "display_name": "Round Mask", "description": "\nRounds the mask or batch of masks to a binary mask. \n\"RoundMask\n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SeparateMasks": {"input": {"required": {"mask": ["MASK"], "size_threshold_width": ["INT", {"default": 256, "min": 0.0, "max": 4096, "step": 1}], "size_threshold_height": ["INT", {"default": 256, "min": 0.0, "max": 4096, "step": 1}], "mode": [["convex_polygons", "area", "box"]], "max_poly_points": ["INT", {"default": 8, "min": 3, "max": 32, "step": 1}]}}, "input_order": {"required": ["mask", "size_threshold_width", "size_threshold_height", "mode", "max_poly_points"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["mask"], "name": "SeparateMasks", "display_name": "Separate Masks", "description": "Separates a mask into multiple masks based on the size of the connected components.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "AddLabel": {"input": {"required": {"image": ["IMAGE"], "text_x": ["INT", {"default": 10, "min": 0, "max": 4096, "step": 1}], "text_y": ["INT", {"default": 2, "min": 0, "max": 4096, "step": 1}], "height": ["INT", {"default": 48, "min": -1, "max": 4096, "step": 1}], "font_size": ["INT", {"default": 32, "min": 0, "max": 4096, "step": 1}], "font_color": ["STRING", {"default": "white"}], "label_color": ["STRING", {"default": "black"}], "font": [["FreeMono.ttf", "FreeMonoBoldOblique.otf", "TTNorms-Black.otf"]], "text": ["STRING", {"default": "Text"}], "direction": [["up", "down", "left", "right", "overlay"], {"default": "up"}]}, "optional": {"caption": ["STRING", {"default": "", "forceInput": true}]}}, "input_order": {"required": ["image", "text_x", "text_y", "height", "font_size", "font_color", "label_color", "font", "text", "direction"], "optional": ["caption"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "AddLabel", "display_name": "Add Label", "description": "\nCreates a new with the given text, and concatenates it to \neither above or below the input image. \nNote that this changes the input image's height! \nFonts are loaded from this folder: \nComfyUI/custom_nodes/ComfyUI-KJNodes/fonts\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ColorMatch": {"input": {"required": {"image_ref": ["IMAGE"], "image_target": ["IMAGE"], "method": [["mkl", "hm", "reinhard", "mvgd", "hm-mvgd-hm", "hm-mkl-hm"], {"default": "mkl"}]}, "optional": {"strength": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["image_ref", "image_target", "method"], "optional": ["strength"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "ColorMatch", "display_name": "Color Match", "description": "\ncolor-matcher enables color transfer across images which comes in handy for automatic \ncolor-grading of photographs, paintings and film sequences as well as light-field \nand stopmotion corrections. \n\nThe methods behind the mappings are based on the approach from Reinhard et al., \nthe Monge-Kantorovich Linearization (MKL) as proposed by Pitie et al. and our analytical solution \nto a Multi-Variate Gaussian Distribution (MVGD) transfer in conjunction with classical histogram \nmatching. As shown below our HM-MVGD-HM compound outperforms existing methods. \nhttps://github.com/hahnec/color-matcher/\n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageTensorList": {"input": {"required": {"image1": ["IMAGE"], "image2": ["IMAGE"]}}, "input_order": {"required": ["image1", "image2"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageTensorList", "display_name": "Image Tensor List", "description": "\nCreates an image list from the input images.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CrossFadeImages": {"input": {"required": {"images_1": ["IMAGE"], "images_2": ["IMAGE"], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"]], "transition_start_index": ["INT", {"default": 1, "min": 0, "max": 4096, "step": 1}], "transitioning_frames": ["INT", {"default": 1, "min": 0, "max": 4096, "step": 1}], "start_level": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "end_level": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["images_1", "images_2", "interpolation", "transition_start_index", "transitioning_frames", "start_level", "end_level"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "CrossFadeImages", "display_name": "Cross Fade Images", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CrossFadeImagesMulti": {"input": {"required": {"inputcount": ["INT", {"default": 2, "min": 2, "max": 1000, "step": 1}], "image_1": ["IMAGE"], "image_2": ["IMAGE"], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"]], "transitioning_frames": ["INT", {"default": 1, "min": 0, "max": 4096, "step": 1}]}}, "input_order": {"required": ["inputcount", "image_1", "image_2", "interpolation", "transitioning_frames"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "CrossFadeImagesMulti", "display_name": "Cross Fade Images Multi", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetImagesFromBatchIndexed": {"input": {"required": {"images": ["IMAGE"], "indexes": ["STRING", {"default": "0, 1, 2", "multiline": true}]}}, "input_order": {"required": ["images", "indexes"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "GetImagesFromBatchIndexed", "display_name": "Get Images From Batch Indexed", "description": "\nSelects and returns the images at the specified indices as an image batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetImageRangeFromBatch": {"input": {"required": {"start_index": ["INT", {"default": 0, "min": -1, "max": 4096, "step": 1}], "num_frames": ["INT", {"default": 1, "min": 1, "max": 4096, "step": 1}]}, "optional": {"images": ["IMAGE"], "masks": ["MASK"]}}, "input_order": {"required": ["start_index", "num_frames"], "optional": ["images", "masks"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "GetImageRangeFromBatch", "display_name": "Get Image or Mask Range From Batch", "description": "\nReturns a range of images from a batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetLatentRangeFromBatch": {"input": {"required": {"latents": ["LATENT"], "start_index": ["INT", {"default": 0, "min": -1, "max": 4096, "step": 1}], "num_frames": ["INT", {"default": 1, "min": -1, "max": 4096, "step": 1}]}}, "input_order": {"required": ["latents", "start_index", "num_frames"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "GetLatentRangeFromBatch", "display_name": "Get Latent Range From Batch", "description": "\nReturns a range of latents from a batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/latents", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetImageSizeAndCount": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE", "INT", "INT", "INT"], "output_is_list": [false, false, false, false], "output_name": ["image", "width", "height", "count"], "name": "GetImageSizeAndCount", "display_name": "Get Image Size & Count", "description": "\nReturns width, height and batch size of the image, \nand passes it through unchanged. \n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FastPreview": {"input": {"required": {"image": ["IMAGE"], "format": [["JPEG", "PNG", "WEBP"], {"default": "JPEG"}], "quality": ["INT", {"default": 75, "min": 1, "max": 100, "step": 1}]}}, "input_order": {"required": ["image", "format", "quality"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "FastPreview", "display_name": "Fast Preview", "description": "Experimental node for faster image previews by displaying through base64 it without saving to disk.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ImageBatchFilter": {"input": {"required": {"images": ["IMAGE"], "empty_color": ["STRING", {"default": "0, 0, 0"}], "empty_threshold": ["FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.01}]}, "optional": {"replacement_image": ["IMAGE"]}}, "input_order": {"required": ["images", "empty_color", "empty_threshold"], "optional": ["replacement_image"]}, "is_input_list": false, "output": ["IMAGE", "STRING"], "output_is_list": [false, false], "output_name": ["images", "removed_indices"], "name": "ImageBatchFilter", "display_name": "Image Batch Filter", "description": "Removes empty images from a batch", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageAndMaskPreview": {"input": {"required": {"mask_opacity": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "mask_color": ["STRING", {"default": "255, 255, 255"}], "pass_through": ["BOOLEAN", {"default": false}]}, "optional": {"image": ["IMAGE"], "mask": ["MASK"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["mask_opacity", "mask_color", "pass_through"], "optional": ["image", "mask"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["composite"], "name": "ImageAndMaskPreview", "display_name": "ImageAndMaskPreview", "description": "\nPreview an image or a mask, when both inputs are used \ncomposites the mask on top of the image.\nwith pass_through on the preview is disabled and the \ncomposite is returned from the composite slot instead, \nthis allows for the preview to be passed for video combine \nnodes for example.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": true, "has_intermediate_output": false, "search_aliases": ["save", "save image", "export image", "output image", "write image", "download"], "essentials_category": "Basics"}, "ImageAddMulti": {"input": {"required": {"inputcount": ["INT", {"default": 2, "min": 2, "max": 1000, "step": 1}], "image_1": ["IMAGE"], "image_2": ["IMAGE"], "blending": [["add", "subtract", "multiply", "difference"], {"default": "add"}], "blend_amount": ["FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.01}]}}, "input_order": {"required": ["inputcount", "image_1", "image_2", "blending", "blend_amount"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "name": "ImageAddMulti", "display_name": "Image Add Multi", "description": "\nAdd blends multiple images together. \nYou can set how many inputs the node has, \nwith the **inputcount** and clicking update.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageBatchMulti": {"input": {"required": {"inputcount": ["INT", {"default": 2, "min": 2, "max": 1000, "step": 1}], "image_1": ["IMAGE"], "image_2": ["IMAGE"]}}, "input_order": {"required": ["inputcount", "image_1", "image_2"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "name": "ImageBatchMulti", "display_name": "Image Batch Multi", "description": "\nCreates an image batch from multiple images. \nYou can set how many inputs the node has, \nwith the **inputcount** and clicking update.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageBatchRepeatInterleaving": {"input": {"required": {"images": ["IMAGE"], "repeats": ["INT", {"default": 1, "min": 1, "max": 4096}]}, "optional": {"mask": ["MASK"]}}, "input_order": {"required": ["images", "repeats"], "optional": ["mask"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "ImageBatchRepeatInterleaving", "display_name": "ImageBatchRepeatInterleaving", "description": "\nRepeats each image in a batch by the specified number of times. \nExample batch of 5 images: 0, 1 ,2, 3, 4 \nwith repeats 2 becomes batch of 10 images: 0, 0, 1, 1, 2, 2, 3, 3, 4, 4 \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageBatchTestPattern": {"input": {"required": {"batch_size": ["INT", {"default": 1, "min": 1, "max": 255, "step": 1}], "start_from": ["INT", {"default": 0, "min": 0, "max": 255, "step": 1}], "text_x": ["INT", {"default": 256, "min": 0, "max": 4096, "step": 1}], "text_y": ["INT", {"default": 256, "min": 0, "max": 4096, "step": 1}], "width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "font": [["FreeMono.ttf", "FreeMonoBoldOblique.otf", "TTNorms-Black.otf"]], "font_size": ["INT", {"default": 255, "min": 8, "max": 4096, "step": 1}]}}, "input_order": {"required": ["batch_size", "start_from", "text_x", "text_y", "width", "height", "font", "font_size"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageBatchTestPattern", "display_name": "Image Batch Test Pattern", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageConcanate": {"input": {"required": {"image1": ["IMAGE"], "image2": ["IMAGE"], "direction": [["right", "down", "left", "up"], {"default": "right"}], "match_image_size": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["image1", "image2", "direction", "match_image_size"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageConcanate", "display_name": "Image Concatenate", "description": "\nConcatenates the image2 to image1 in the specified direction.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageConcatFromBatch": {"input": {"required": {"images": ["IMAGE"], "num_columns": ["INT", {"default": 3, "min": 1, "max": 255, "step": 1}], "match_image_size": ["BOOLEAN", {"default": false}], "max_resolution": ["INT", {"default": 4096}]}}, "input_order": {"required": ["images", "num_columns", "match_image_size", "max_resolution"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageConcatFromBatch", "display_name": "Image Concatenate From Batch", "description": "\n Concatenates images from a batch into a grid with a specified number of columns.\n ", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageConcatMulti": {"input": {"required": {"inputcount": ["INT", {"default": 2, "min": 2, "max": 1000, "step": 1}], "image_1": ["IMAGE"], "image_2": ["IMAGE"], "direction": [["right", "down", "left", "up"], {"default": "right"}], "match_image_size": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["inputcount", "image_1", "image_2", "direction", "match_image_size"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "name": "ImageConcatMulti", "display_name": "Image Concatenate Multi", "description": "\nCreates an image from multiple images. \nYou can set how many inputs the node has, \nwith the **inputcount** and clicking update.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageCropByMask": {"input": {"required": {"image": ["IMAGE"], "mask": ["MASK"]}}, "input_order": {"required": ["image", "mask"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "ImageCropByMask", "display_name": "Image Crop By Mask", "description": "Crops the input images based on the provided mask.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageCropByMaskAndResize": {"input": {"required": {"image": ["IMAGE"], "mask": ["MASK"], "base_resolution": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 8}], "padding": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "min_crop_resolution": ["INT", {"default": 128, "min": 0, "max": 16384, "step": 8}], "max_crop_resolution": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 8}]}}, "input_order": {"required": ["image", "mask", "base_resolution", "padding", "min_crop_resolution", "max_crop_resolution"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "BBOX"], "output_is_list": [false, false, false], "output_name": ["images", "masks", "bbox"], "name": "ImageCropByMaskAndResize", "display_name": "Image Crop By Mask And Resize", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageCropByMaskBatch": {"input": {"required": {"image": ["IMAGE"], "masks": ["MASK"], "width": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 8}], "height": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 8}], "padding": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}], "preserve_size": ["BOOLEAN", {"default": false}], "bg_color": ["STRING", {"default": "0, 0, 0", "tooltip": "Color as RGB values in range 0-255, separated by commas."}]}}, "input_order": {"required": ["image", "masks", "width", "height", "padding", "preserve_size", "bg_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["images", "masks"], "name": "ImageCropByMaskBatch", "display_name": "Image Crop By Mask Batch", "description": "Crops the input images based on the provided masks.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageUncropByMask": {"input": {"required": {"destination": ["IMAGE"], "source": ["IMAGE"], "mask": ["MASK"], "bbox": ["BBOX"]}}, "input_order": {"required": ["destination", "source", "mask", "bbox"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "ImageUncropByMask", "display_name": "Image Uncrop By Mask", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageGrabPIL": {"input": {"required": {"x": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}], "y": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}], "width": ["INT", {"default": 512, "min": 0, "max": 4096, "step": 1}], "height": ["INT", {"default": 512, "min": 0, "max": 4096, "step": 1}], "num_frames": ["INT", {"default": 1, "min": 1, "max": 255, "step": 1}], "delay": ["FLOAT", {"default": 0.1, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["x", "y", "width", "height", "num_frames", "delay"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "ImageGrabPIL", "display_name": "Image Grab PIL", "description": "\nCaptures an area specified by screen coordinates. \nCan be used for realtime diffusion with autoqueue.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageGridComposite2x2": {"input": {"required": {"image1": ["IMAGE"], "image2": ["IMAGE"], "image3": ["IMAGE"], "image4": ["IMAGE"]}}, "input_order": {"required": ["image1", "image2", "image3", "image4"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageGridComposite2x2", "display_name": "Image Grid Composite 2x2", "description": "\nConcatenates the 4 input images into a 2x2 grid. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageGridComposite3x3": {"input": {"required": {"image1": ["IMAGE"], "image2": ["IMAGE"], "image3": ["IMAGE"], "image4": ["IMAGE"], "image5": ["IMAGE"], "image6": ["IMAGE"], "image7": ["IMAGE"], "image8": ["IMAGE"], "image9": ["IMAGE"]}}, "input_order": {"required": ["image1", "image2", "image3", "image4", "image5", "image6", "image7", "image8", "image9"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageGridComposite3x3", "display_name": "Image Grid Composite 3x3", "description": "\nConcatenates the 9 input images into a 3x3 grid. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageGridtoBatch": {"input": {"required": {"image": ["IMAGE"], "columns": ["INT", {"default": 3, "min": 1, "max": 8, "tooltip": "The number of columns in the grid."}], "rows": ["INT", {"default": 0, "min": 1, "max": 8, "tooltip": "The number of rows in the grid. Set to 0 for automatic calculation."}]}}, "input_order": {"required": ["image", "columns", "rows"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageGridtoBatch", "display_name": "Image Grid To Batch", "description": "Converts a grid of images to a batch of images.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageNoiseAugmentation": {"input": {"required": {"image": ["IMAGE"], "noise_aug_strength": ["FLOAT", {"default": null, "min": 0.0, "max": 100.0, "step": 0.001}], "seed": ["INT", {"default": 123, "min": 0, "max": 18446744073709551615, "step": 1}]}}, "input_order": {"required": ["image", "noise_aug_strength", "seed"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageNoiseAugmentation", "display_name": "Image Noise Augmentation", "description": "\n Add noise to an image. \n ", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageNormalize_Neg1_To_1": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageNormalize_Neg1_To_1", "display_name": "Image Normalize -1 to 1", "description": "\nNormalize the images to be in the range [-1, 1] \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImagePass": {"input": {"required": {}, "optional": {"image": ["IMAGE"]}}, "input_order": {"required": [], "optional": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImagePass", "display_name": "ImagePass", "description": "\nPasses the image through without modifying it.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImagePadKJ": {"input": {"required": {"image": ["IMAGE"], "left": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "right": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "top": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "bottom": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "extra_padding": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "pad_mode": [["edge", "color"]], "color": ["STRING", {"default": "0, 0, 0", "tooltip": "Color as RGB values in range 0-255, separated by commas."}]}, "optional": {"mask": ["MASK"], "target_width": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1, "forceInput": true}], "target_height": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1, "forceInput": true}]}}, "input_order": {"required": ["image", "left", "right", "top", "bottom", "extra_padding", "pad_mode", "color"], "optional": ["mask", "target_width", "target_height"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["images", "masks"], "name": "ImagePadKJ", "display_name": "ImagePad KJ", "description": "Pad the input image and optionally mask with the specified padding.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImagePadForOutpaintMasked": {"input": {"required": {"image": ["IMAGE"], "left": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "top": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "right": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "bottom": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "feathering": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}]}, "optional": {"mask": ["MASK"]}}, "input_order": {"required": ["image", "left", "top", "right", "bottom", "feathering"], "optional": ["mask"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "ImagePadForOutpaintMasked", "display_name": "Image Pad For Outpaint Masked", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImagePadForOutpaintTargetSize": {"input": {"required": {"image": ["IMAGE"], "target_width": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "target_height": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "feathering": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]]}, "optional": {"mask": ["MASK"]}}, "input_order": {"required": ["image", "target_width", "target_height", "feathering", "upscale_method"], "optional": ["mask"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "ImagePadForOutpaintTargetSize", "display_name": "Image Pad For Outpaint Target Size", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImagePrepForICLora": {"input": {"required": {"reference_image": ["IMAGE"], "output_width": ["INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}], "output_height": ["INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}], "border_width": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}]}, "optional": {"latent_image": ["IMAGE"], "latent_mask": ["MASK"], "reference_mask": ["MASK"]}}, "input_order": {"required": ["reference_image", "output_width", "output_height", "border_width"], "optional": ["latent_image", "latent_mask", "reference_mask"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "ImagePrepForICLora", "display_name": "Image Prep For ICLora", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageResizeKJ": {"input": {"required": {"image": ["IMAGE"], "width": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1}], "height": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]], "keep_proportion": ["BOOLEAN", {"default": false}], "divisible_by": ["INT", {"default": 2, "min": 0, "max": 512, "step": 1}]}, "optional": {"get_image_size": ["IMAGE"], "crop": [["disabled", "center", 0], {"tooltip": "0 will do the default center crop, this is a workaround for the widget order changing with the new frontend, as in old workflows the value of this widget becomes 0 automatically"}]}}, "input_order": {"required": ["image", "width", "height", "upscale_method", "keep_proportion", "divisible_by"], "optional": ["get_image_size", "crop"]}, "is_input_list": false, "output": ["IMAGE", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "width", "height"], "name": "ImageResizeKJ", "display_name": "Resize Image (deprecated)", "description": "\nDEPRECATED!\n\nDue to ComfyUI frontend changes, this node should no longer be used, please check the \nv2 of the node. This node is only kept to not completely break older workflows. \n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "ImageResizeKJv2": {"input": {"required": {"image": ["IMAGE"], "width": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1}], "height": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 1}], "upscale_method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]], "keep_proportion": [["stretch", "resize", "pad", "pad_edge", "crop"], {"default": false}], "pad_color": ["STRING", {"default": "0, 0, 0", "tooltip": "Color to use for padding."}], "crop_position": [["center", "top", "bottom", "left", "right"], {"default": "center"}], "divisible_by": ["INT", {"default": 2, "min": 0, "max": 512, "step": 1}]}, "optional": {"mask": ["MASK"], "device": [["cpu", "gpu"]]}, "hidden": {"unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["image", "width", "height", "upscale_method", "keep_proportion", "pad_color", "crop_position", "divisible_by"], "optional": ["mask", "device"], "hidden": ["unique_id"]}, "is_input_list": false, "output": ["IMAGE", "INT", "INT", "MASK"], "output_is_list": [false, false, false, false], "output_name": ["IMAGE", "width", "height", "mask"], "name": "ImageResizeKJv2", "display_name": "Resize Image v2", "description": "\nResizes the image to the specified width and height. \nSize can be retrieved from the input.\n\nKeep proportions keeps the aspect ratio of the image, by \nhighest dimension. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageUpscaleWithModelBatched": {"input": {"required": {"upscale_model": ["UPSCALE_MODEL"], "images": ["IMAGE"], "per_batch": ["INT", {"default": 16, "min": 1, "max": 4096, "step": 1}]}}, "input_order": {"required": ["upscale_model", "images", "per_batch"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageUpscaleWithModelBatched", "display_name": "Image Upscale With Model Batched", "description": "\nSame as ComfyUI native model upscaling node, \nbut allows setting sub-batches for reduced VRAM usage.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "InsertImagesToBatchIndexed": {"input": {"required": {"original_images": ["IMAGE"], "images_to_insert": ["IMAGE"], "indexes": ["STRING", {"default": "0, 1, 2", "multiline": true}]}, "optional": {"mode": [["replace", "insert"]]}}, "input_order": {"required": ["original_images", "images_to_insert", "indexes"], "optional": ["mode"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "InsertImagesToBatchIndexed", "display_name": "Insert Images To Batch Indexed", "description": "\nInserts images at the specified indices into the original image batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "InsertLatentToIndexed": {"input": {"required": {"source": ["LATENT"], "destination": ["LATENT"], "index": ["INT", {"default": 0, "min": -1, "max": 4096, "step": 1}]}}, "input_order": {"required": ["source", "destination", "index"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "InsertLatentToIndexed", "display_name": "Insert Latent To Index", "description": "\nInserts a latent at the specified index into the original latent batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/latents", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LoadAndResizeImage": {"input": {"required": {"image": [["2.png", "RunComFy_examples_1384_1.png", "RunComfy_examples_1384_1.png", "RunComfy_examples_1386_1.jpg", "RunComfy_examples_1386_2.jpg", "RunComfy_examples_1386_3.jpg", "RunComfy_examples_1386_4.jpg", "RunComfy_examples_1386_5.jpg", "Runcomfy_example_1277.png", "example.png", "ref.jpg"], {"image_upload": true}], "resize": ["BOOLEAN", {"default": false}], "width": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 8}], "height": ["INT", {"default": 512, "min": 0, "max": 16384, "step": 8}], "repeat": ["INT", {"default": 1, "min": 1, "max": 4096, "step": 1}], "keep_proportion": ["BOOLEAN", {"default": false}], "divisible_by": ["INT", {"default": 2, "min": 0, "max": 512, "step": 1}], "mask_channel": [["alpha", "red", "green", "blue"], {"tooltip": "Channel to use for the mask output"}], "background_color": ["STRING", {"default": "", "tooltip": "Fills the alpha channel with the specified color."}]}}, "input_order": {"required": ["image", "resize", "width", "height", "repeat", "keep_proportion", "divisible_by", "mask_channel", "background_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT", "INT", "STRING"], "output_is_list": [false, false, false, false, false], "output_name": ["image", "mask", "width", "height", "image_path"], "name": "LoadAndResizeImage", "display_name": "Load & Resize Image", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LoadImagesFromFolderKJ": {"input": {"required": {"folder": ["STRING", {"default": ""}], "width": ["INT", {"default": 1024, "min": -1, "step": 1}], "height": ["INT", {"default": 1024, "min": -1, "step": 1}], "keep_aspect_ratio": [["crop", "pad", "stretch"]]}, "optional": {"image_load_cap": ["INT", {"default": 0, "min": 0, "step": 1}], "start_index": ["INT", {"default": 0, "min": 0, "step": 1}], "include_subfolders": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["folder", "width", "height", "keep_aspect_ratio"], "optional": ["image_load_cap", "start_index", "include_subfolders"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "INT", "STRING"], "output_is_list": [false, false, false, false], "output_name": ["image", "mask", "count", "image_path"], "name": "LoadImagesFromFolderKJ", "display_name": "Load Images From Folder (KJ)", "description": "Loads images from a folder into a batch, images are resized and loaded into a batch.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MergeImageChannels": {"input": {"required": {"red": ["IMAGE"], "green": ["IMAGE"], "blue": ["IMAGE"]}, "optional": {"alpha": ["MASK", {"default": null}]}}, "input_order": {"required": ["red", "green", "blue"], "optional": ["alpha"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "MergeImageChannels", "display_name": "Merge Image Channels", "description": "\nMerges channel data into an image. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PadImageBatchInterleaved": {"input": {"required": {"images": ["IMAGE"], "empty_frames_per_image": ["INT", {"default": 1, "min": 0, "max": 4096, "step": 1}], "pad_frame_value": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "add_after_last": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["images", "empty_frames_per_image", "pad_frame_value", "add_after_last"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["images", "masks"], "name": "PadImageBatchInterleaved", "display_name": "Pad Image Batch Interleaved", "description": "\nInserts empty frames between the images in a batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PreviewAnimation": {"input": {"required": {"fps": ["FLOAT", {"default": 8.0, "min": 0.01, "max": 1000.0, "step": 0.01}]}, "optional": {"images": ["IMAGE"], "masks": ["MASK"]}}, "input_order": {"required": ["fps"], "optional": ["images", "masks"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "PreviewAnimation", "display_name": "Preview Animation", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "RemapImageRange": {"input": {"required": {"image": ["IMAGE"], "min": ["FLOAT", {"default": 0.0, "min": -10.0, "max": 1.0, "step": 0.01}], "max": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "clamp": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["image", "min", "max", "clamp"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "RemapImageRange", "display_name": "Remap Image Range", "description": "\nRemaps the image values to the specified range. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ReverseImageBatch": {"input": {"required": {"images": ["IMAGE"]}}, "input_order": {"required": ["images"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ReverseImageBatch", "display_name": "Reverse Image Batch", "description": "\nReverses the order of the images in a batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ReplaceImagesInBatch": {"input": {"required": {"start_index": ["INT", {"default": 1, "min": 0, "max": 4096, "step": 1}]}, "optional": {"original_images": ["IMAGE"], "replacement_images": ["IMAGE"], "original_masks": ["MASK"], "replacement_masks": ["MASK"]}}, "input_order": {"required": ["start_index"], "optional": ["original_images", "replacement_images", "original_masks", "replacement_masks"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["IMAGE", "MASK"], "name": "ReplaceImagesInBatch", "display_name": "Replace Images In Batch", "description": "\nReplaces the images in a batch, starting from the specified start index, \nwith the replacement images.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SaveImageWithAlpha": {"input": {"required": {"images": ["IMAGE"], "mask": ["MASK"], "filename_prefix": ["STRING", {"default": "ComfyUI"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "mask", "filename_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "SaveImageWithAlpha", "display_name": "Save Image With Alpha", "description": "\nSaves an image and mask as .PNG with the mask as the alpha channel. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SaveImageKJ": {"input": {"required": {"images": ["IMAGE", {"tooltip": "The images to save."}], "filename_prefix": ["STRING", {"default": "ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}], "output_folder": ["STRING", {"default": "output", "tooltip": "The folder to save the images to."}]}, "optional": {"caption_file_extension": ["STRING", {"default": ".txt", "tooltip": "The extension for the caption file."}], "caption": ["STRING", {"forceInput": true, "tooltip": "string to save as .txt file"}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "filename_prefix", "output_folder"], "optional": ["caption_file_extension", "caption"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["filename"], "name": "SaveImageKJ", "display_name": "Save Image KJ", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ShuffleImageBatch": {"input": {"required": {"images": ["IMAGE"], "seed": ["INT", {"default": 123, "min": 0, "max": 18446744073709551615, "step": 1}]}}, "input_order": {"required": ["images", "seed"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ShuffleImageBatch", "display_name": "Shuffle Image Batch", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SplitImageChannels": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "IMAGE", "MASK"], "output_is_list": [false, false, false, false], "output_name": ["red", "green", "blue", "mask"], "name": "SplitImageChannels", "display_name": "Split Image Channels", "description": "\nSplits image channels into images where the selected channel \nis repeated for all channels, and the alpha as a mask. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TransitionImagesMulti": {"input": {"required": {"inputcount": ["INT", {"default": 2, "min": 2, "max": 1000, "step": 1}], "image_1": ["IMAGE"], "image_2": ["IMAGE"], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"]], "transition_type": [["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"]], "transitioning_frames": ["INT", {"default": 1, "min": 0, "max": 4096, "step": 1}], "blur_radius": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}], "reverse": ["BOOLEAN", {"default": false}], "device": [["CPU", "GPU"], {"default": "CPU"}]}}, "input_order": {"required": ["inputcount", "image_1", "image_2", "interpolation", "transition_type", "transitioning_frames", "blur_radius", "reverse", "device"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "TransitionImagesMulti", "display_name": "Transition Images Multi", "description": "\nCreates transitions between images.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TransitionImagesInBatch": {"input": {"required": {"images": ["IMAGE"], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"]], "transition_type": [["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"]], "transitioning_frames": ["INT", {"default": 1, "min": 0, "max": 4096, "step": 1}], "blur_radius": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}], "reverse": ["BOOLEAN", {"default": false}], "device": [["CPU", "GPU"], {"default": "CPU"}]}}, "input_order": {"required": ["images", "interpolation", "transition_type", "transitioning_frames", "blur_radius", "reverse", "device"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "TransitionImagesInBatch", "display_name": "Transition Images In Batch", "description": "\nCreates transitions between images in a batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BatchCropFromMask": {"input": {"required": {"original_images": ["IMAGE"], "masks": ["MASK"], "crop_size_mult": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}], "bbox_smooth_alpha": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["original_images", "masks", "crop_size_mult", "bbox_smooth_alpha"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "BBOX", "INT", "INT"], "output_is_list": [false, false, false, false, false], "output_name": ["original_images", "cropped_images", "bboxes", "width", "height"], "name": "BatchCropFromMask", "display_name": "Batch Crop From Mask", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BatchCropFromMaskAdvanced": {"input": {"required": {"original_images": ["IMAGE"], "masks": ["MASK"], "crop_size_mult": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "bbox_smooth_alpha": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["original_images", "masks", "crop_size_mult", "bbox_smooth_alpha"]}, "is_input_list": false, "output": ["IMAGE", "IMAGE", "MASK", "IMAGE", "MASK", "BBOX", "BBOX", "INT", "INT"], "output_is_list": [false, false, false, false, false, false, false, false, false], "output_name": ["original_images", "cropped_images", "cropped_masks", "combined_crop_image", "combined_crop_masks", "bboxes", "combined_bounding_box", "bbox_width", "bbox_height"], "name": "BatchCropFromMaskAdvanced", "display_name": "Batch Crop From Mask Advanced", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FilterZeroMasksAndCorrespondingImages": {"input": {"required": {"masks": ["MASK"]}, "optional": {"original_images": ["IMAGE"]}}, "input_order": {"required": ["masks"], "optional": ["original_images"]}, "is_input_list": false, "output": ["MASK", "IMAGE", "IMAGE", "INDEXES"], "output_is_list": [false, false, false, false], "output_name": ["non_zero_masks_out", "non_zero_mask_images_out", "zero_mask_images_out", "zero_mask_images_out_indexes"], "name": "FilterZeroMasksAndCorrespondingImages", "display_name": "FilterZeroMasksAndCorrespondingImages", "description": "\nFilter out all the empty (i.e. all zero) mask in masks \nAlso filter out all the corresponding images in original_images by indexes if provide \n \noriginal_images (optional): If provided, need have same length as masks.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "InsertImageBatchByIndexes": {"input": {"required": {"images": ["IMAGE"], "images_to_insert": ["IMAGE"], "insert_indexes": ["INDEXES"]}}, "input_order": {"required": ["images", "images_to_insert", "insert_indexes"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images_after_insert"], "name": "InsertImageBatchByIndexes", "display_name": "Insert Image Batch By Indexes", "description": "\nThis node is designed to be use with node FilterZeroMasksAndCorrespondingImages\nIt inserts the images_to_insert into images according to insert_indexes\n\nReturns:\n images_after_insert: updated original images with origonal sequence order\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BatchUncrop": {"input": {"required": {"original_images": ["IMAGE"], "cropped_images": ["IMAGE"], "bboxes": ["BBOX"], "border_blending": ["FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}], "crop_rescale": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "border_top": ["BOOLEAN", {"default": true}], "border_bottom": ["BOOLEAN", {"default": true}], "border_left": ["BOOLEAN", {"default": true}], "border_right": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["original_images", "cropped_images", "bboxes", "border_blending", "crop_rescale", "border_top", "border_bottom", "border_left", "border_right"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "BatchUncrop", "display_name": "Batch Uncrop", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BatchUncropAdvanced": {"input": {"required": {"original_images": ["IMAGE"], "cropped_images": ["IMAGE"], "cropped_masks": ["MASK"], "combined_crop_mask": ["MASK"], "bboxes": ["BBOX"], "border_blending": ["FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}], "crop_rescale": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "use_combined_mask": ["BOOLEAN", {"default": false}], "use_square_mask": ["BOOLEAN", {"default": true}]}, "optional": {"combined_bounding_box": ["BBOX", {"default": null}]}}, "input_order": {"required": ["original_images", "cropped_images", "cropped_masks", "combined_crop_mask", "bboxes", "border_blending", "crop_rescale", "use_combined_mask", "use_square_mask"], "optional": ["combined_bounding_box"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "BatchUncropAdvanced", "display_name": "Batch Uncrop Advanced", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SplitBboxes": {"input": {"required": {"bboxes": ["BBOX"], "index": ["INT", {"default": 0, "min": 0, "max": 99999999, "step": 1}]}}, "input_order": {"required": ["bboxes", "index"]}, "is_input_list": false, "output": ["BBOX", "BBOX"], "output_is_list": [false, false], "output_name": ["bboxes_a", "bboxes_b"], "name": "SplitBboxes", "display_name": "Split Bboxes", "description": "\nSplits the specified bbox list at the given index into two lists.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BboxToInt": {"input": {"required": {"bboxes": ["BBOX"], "index": ["INT", {"default": 0, "min": 0, "max": 99999999, "step": 1}]}}, "input_order": {"required": ["bboxes", "index"]}, "is_input_list": false, "output": ["INT", "INT", "INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false, false, false], "output_name": ["x_min", "y_min", "width", "height", "center_x", "center_y"], "name": "BboxToInt", "display_name": "Bbox To Int", "description": "\nReturns selected index from bounding box list as integers.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BboxVisualize": {"input": {"required": {"images": ["IMAGE"], "bboxes": ["BBOX"], "line_width": ["INT", {"default": 1, "min": 1, "max": 10, "step": 1}]}}, "input_order": {"required": ["images", "bboxes", "line_width"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["images"], "name": "BboxVisualize", "display_name": "Bbox Visualize", "description": "\nVisualizes the specified bbox on the image.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GenerateNoise": {"input": {"required": {"width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "seed": ["INT", {"default": 123, "min": 0, "max": 18446744073709551615, "step": 1}], "multiplier": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 4096, "step": 0.01}], "constant_batch_noise": ["BOOLEAN", {"default": false}], "normalize": ["BOOLEAN", {"default": false}]}, "optional": {"model": ["MODEL"], "sigmas": ["SIGMAS"], "latent_channels": [["4", "16"]], "shape": [["BCHW", "BCTHW", "BTCHW"]]}}, "input_order": {"required": ["width", "height", "batch_size", "seed", "multiplier", "constant_batch_noise", "normalize"], "optional": ["model", "sigmas", "latent_channels", "shape"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "GenerateNoise", "display_name": "Generate Noise", "description": "\nGenerates noise for injection or to be used as empty latents on samplers with add_noise off.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/noise", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FlipSigmasAdjusted": {"input": {"required": {"sigmas": ["SIGMAS"], "divide_by_last_sigma": ["BOOLEAN", {"default": false}], "divide_by": ["FLOAT", {"default": 1, "min": 1, "max": 255, "step": 0.01}], "offset_by": ["INT", {"default": 1, "min": -100, "max": 100, "step": 1}]}}, "input_order": {"required": ["sigmas", "divide_by_last_sigma", "divide_by", "offset_by"]}, "is_input_list": false, "output": ["SIGMAS", "STRING"], "output_is_list": [false, false], "output_name": ["SIGMAS", "sigmas_string"], "name": "FlipSigmasAdjusted", "display_name": "Flip Sigmas Adjusted", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/noise", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "InjectNoiseToLatent": {"input": {"required": {"latents": ["LATENT"], "strength": ["FLOAT", {"default": 0.1, "min": 0.0, "max": 200.0, "step": 0.0001}], "noise": ["LATENT"], "normalize": ["BOOLEAN", {"default": false}], "average": ["BOOLEAN", {"default": false}]}, "optional": {"mask": ["MASK"], "mix_randn_amount": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.001}], "seed": ["INT", {"default": 123, "min": 0, "max": 18446744073709551615, "step": 1}]}}, "input_order": {"required": ["latents", "strength", "noise", "normalize", "average"], "optional": ["mask", "mix_randn_amount", "seed"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "InjectNoiseToLatent", "display_name": "Inject Noise To Latent", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/noise", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CustomSigmas": {"input": {"required": {"sigmas_string": ["STRING", {"default": "14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029", "multiline": true}], "interpolate_to_steps": ["INT", {"default": 10, "min": 0, "max": 255, "step": 1}]}}, "input_order": {"required": ["sigmas_string", "interpolate_to_steps"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "name": "CustomSigmas", "display_name": "Custom Sigmas", "description": "\nCreates a sigmas tensor from a string of comma separated values. \nExamples: \n \nNvidia's optimized AYS 10 step schedule for SD 1.5: \n14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029 \nSDXL: \n14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029 \nSVD: \n700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002 \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/noise", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StringToFloatList": {"input": {"required": {"string": ["STRING", {"default": "1, 2, 3", "multiline": true}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "StringToFloatList", "display_name": "String to Float List", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "WidgetToString": {"input": {"required": {"id": ["INT", {"default": 0, "min": 0, "max": 100000, "step": 1}], "widget_name": ["STRING", {"multiline": false}], "return_all": ["BOOLEAN", {"default": false}]}, "optional": {"any_input": ["*"], "node_title": ["STRING", {"multiline": false}], "allowed_float_decimals": ["INT", {"default": 2, "min": 0, "max": 10, "tooltip": "Number of decimal places to display for float values"}]}, "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", "prompt": "PROMPT", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["id", "widget_name", "return_all"], "optional": ["any_input", "node_title", "allowed_float_decimals"], "hidden": ["extra_pnginfo", "prompt", "unique_id"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "WidgetToString", "display_name": "Widget To String", "description": "\nSelects a node and it's specified widget and outputs the value as a string. \nIf no node id or title is provided it will use the 'any_input' link and use that node. \nTo see node id's, enable node id display from Manager badge menu. \nAlternatively you can search with the node title. Node titles ONLY exist if they \nare manually edited! \nThe 'any_input' is required for making sure the node you want the value from exists in the workflow.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SaveStringKJ": {"input": {"required": {"string": ["STRING", {"forceInput": true, "tooltip": "string to save as .txt file"}], "filename_prefix": ["STRING", {"default": "text", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}], "output_folder": ["STRING", {"default": "output", "tooltip": "The folder to save the images to."}]}, "optional": {"file_extension": ["STRING", {"default": ".txt", "tooltip": "The extension for the caption file."}]}}, "input_order": {"required": ["string", "filename_prefix", "output_folder"], "optional": ["file_extension"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["filename"], "name": "SaveStringKJ", "display_name": "Save String KJ", "description": "Saves the input string to your ComfyUI output directory.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DummyOut": {"input": {"required": {"any_input": ["*"]}}, "input_order": {"required": ["any_input"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "DummyOut", "display_name": "Dummy Out", "description": "\nDoes nothing, used to trigger generic workflow output. \nA way to get previews in the UI without saving anything to disk.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GetLatentsFromBatchIndexed": {"input": {"required": {"latents": ["LATENT"], "indexes": ["STRING", {"default": "0, 1, 2", "multiline": true}], "latent_format": [["BCHW", "BTCHW", "BCTHW"], {"default": "BCHW"}]}}, "input_order": {"required": ["latents", "indexes", "latent_format"]}, "is_input_list": false, "output": ["LATENT"], "output_is_list": [false], "output_name": ["LATENT"], "name": "GetLatentsFromBatchIndexed", "display_name": "Get Latents From Batch Indexed", "description": "\nSelects and returns the latents at the specified indices as an latent batch.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/latents", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ScaleBatchPromptSchedule": {"input": {"required": {"input_str": ["STRING", {"forceInput": true, "default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n"}], "old_frame_count": ["INT", {"forceInput": true, "default": 1, "min": 1, "max": 4096, "step": 1}], "new_frame_count": ["INT", {"forceInput": true, "default": 1, "min": 1, "max": 4096, "step": 1}]}}, "input_order": {"required": ["input_str", "old_frame_count", "new_frame_count"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "ScaleBatchPromptSchedule", "display_name": "Scale Batch Prompt Schedule", "description": "\nScales a batch schedule from Fizz' nodes BatchPromptSchedule\nto a different frame count.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CameraPoseVisualizer": {"input": {"required": {"pose_file_path": ["STRING", {"default": "", "multiline": false}], "base_xval": ["FLOAT", {"default": 0.2, "min": 0, "max": 100, "step": 0.01}], "zval": ["FLOAT", {"default": 0.3, "min": 0, "max": 100, "step": 0.01}], "scale": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 10.0, "step": 0.01}], "use_exact_fx": ["BOOLEAN", {"default": false}], "relative_c2w": ["BOOLEAN", {"default": true}], "use_viewer": ["BOOLEAN", {"default": false}]}, "optional": {"cameractrl_poses": ["CAMERACTRL_POSES", {"default": null}]}}, "input_order": {"required": ["pose_file_path", "base_xval", "zval", "scale", "use_exact_fx", "relative_c2w", "use_viewer"], "optional": ["cameractrl_poses"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "CameraPoseVisualizer", "display_name": "Camera Pose Visualizer", "description": "\nVisualizes the camera poses, from Animatediff-Evolved CameraCtrl Pose \nor a .txt file with RealEstate camera intrinsics and coordinates, in a 3D plot. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AppendStringsToList": {"input": {"required": {"string1": ["STRING", {"default": "", "forceInput": true}], "string2": ["STRING", {"default": "", "forceInput": true}]}}, "input_order": {"required": ["string1", "string2"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "AppendStringsToList", "display_name": "Append Strings To List", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "JoinStrings": {"input": {"required": {"string1": ["STRING", {"default": "", "forceInput": true}], "string2": ["STRING", {"default": "", "forceInput": true}], "delimiter": ["STRING", {"default": " ", "multiline": false}]}}, "input_order": {"required": ["string1", "string2", "delimiter"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "JoinStrings", "display_name": "Join Strings", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "JoinStringMulti": {"input": {"required": {"inputcount": ["INT", {"default": 2, "min": 2, "max": 1000, "step": 1}], "string_1": ["STRING", {"default": "", "forceInput": true}], "string_2": ["STRING", {"default": "", "forceInput": true}], "delimiter": ["STRING", {"default": " ", "multiline": false}], "return_list": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["inputcount", "string_1", "string_2", "delimiter", "return_list"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["string"], "name": "JoinStringMulti", "display_name": "Join String Multi", "description": "\nCreates single string, or a list of strings, from \nmultiple input strings. \nYou can set how many inputs the node has, \nwith the **inputcount** and clicking update.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SomethingToString": {"input": {"required": {"input": ["*"]}, "optional": {"prefix": ["STRING", {"default": ""}], "suffix": ["STRING", {"default": ""}]}}, "input_order": {"required": ["input"], "optional": ["prefix", "suffix"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "SomethingToString", "display_name": "Something To String", "description": "\nConverts any type to a string.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Sleep": {"input": {"required": {"input": ["*"], "minutes": ["INT", {"default": 0, "min": 0, "max": 1439}], "seconds": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 59.99, "step": 0.01}]}}, "input_order": {"required": ["input", "minutes", "seconds"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "Sleep", "display_name": "Sleep", "description": "\nDelays the execution for the input amount of time.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VRAM_Debug": {"input": {"required": {"empty_cache": ["BOOLEAN", {"default": true}], "gc_collect": ["BOOLEAN", {"default": true}], "unload_all_models": ["BOOLEAN", {"default": false}]}, "optional": {"any_input": ["*"], "image_pass": ["IMAGE"], "model_pass": ["MODEL"]}}, "input_order": {"required": ["empty_cache", "gc_collect", "unload_all_models"], "optional": ["any_input", "image_pass", "model_pass"]}, "is_input_list": false, "output": ["*", "IMAGE", "MODEL", "INT", "INT"], "output_is_list": [false, false, false, false, false], "output_name": ["any_output", "image_pass", "model_pass", "freemem_before", "freemem_after"], "name": "VRAM_Debug", "display_name": "VRAM Debug", "description": "\nReturns the inputs unchanged, they are only used as triggers, \nand performs comfy model management functions and garbage collection, \nreports free VRAM before and after the operations.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "EmptyLatentImagePresets": {"input": {"required": {"dimensions": [["512 x 512 (1:1)", "768 x 512 (1.5:1)", "960 x 512 (1.875:1)", "1024 x 512 (2:1)", "1024 x 576 (1.778:1)", "1536 x 640 (2.4:1)", "1344 x 768 (1.75:1)", "1216 x 832 (1.46:1)", "1152 x 896 (1.286:1)", "1024 x 1024 (1:1)"], {"default": "512 x 512 (1:1)"}], "invert": ["BOOLEAN", {"default": false}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["dimensions", "invert", "batch_size"]}, "is_input_list": false, "output": ["LATENT", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["Latent", "Width", "Height"], "name": "EmptyLatentImagePresets", "display_name": "Empty Latent Image Presets", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/latents", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "EmptyLatentImageCustomPresets": {"input": {"required": {"dimensions": [[]], "invert": ["BOOLEAN", {"default": false}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}]}}, "input_order": {"required": ["dimensions", "invert", "batch_size"]}, "is_input_list": false, "output": ["LATENT", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["Latent", "Width", "Height"], "name": "EmptyLatentImageCustomPresets", "display_name": "Empty Latent Image Custom Presets", "description": "\nGenerates an empty latent image with the specified dimensions. \nThe choices are loaded from 'custom_dimensions.json' in the nodes folder.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/latents", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelPassThrough": {"input": {"required": {}, "optional": {"model": ["MODEL"]}}, "input_order": {"required": [], "optional": ["model"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["model"], "name": "ModelPassThrough", "display_name": "ModelPass", "description": "\n Simply passes through the model,\n workaround for Set node not allowing bypassed inputs.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ModelSaveKJ": {"input": {"required": {"model": ["MODEL"], "filename_prefix": ["STRING", {"default": "diffusion_models/ComfyUI"}], "model_key_prefix": ["STRING", {"default": "model.diffusion_model."}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["model", "filename_prefix", "model_key_prefix"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "ModelSaveKJ", "display_name": "Model Save KJ", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "advanced/model_merging", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetShakkerLabsUnionControlNetType": {"input": {"required": {"control_net": ["CONTROL_NET"], "type": [["auto", "canny", "tile", "depth", "blur", "pose", "gray", "low quality"]]}}, "input_order": {"required": ["control_net", "type"]}, "is_input_list": false, "output": ["CONTROL_NET"], "output_is_list": [false], "output_name": ["CONTROL_NET"], "name": "SetShakkerLabsUnionControlNetType", "display_name": "Set Shakker Labs Union ControlNet Type", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "conditioning/controlnet", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StyleModelApplyAdvanced": {"input": {"required": {"conditioning": ["CONDITIONING"], "style_model": ["STYLE_MODEL"], "clip_vision_output": ["CLIP_VISION_OUTPUT"], "strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}]}}, "input_order": {"required": ["conditioning", "style_model", "clip_vision_output", "strength"]}, "is_input_list": false, "output": ["CONDITIONING"], "output_is_list": [false], "output_name": ["CONDITIONING"], "name": "StyleModelApplyAdvanced", "display_name": "Style Model Apply Advanced", "description": "StyleModelApply but with strength parameter", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "NormalizedAmplitudeToMask": {"input": {"required": {"normalized_amp": ["NORMALIZED_AMPLITUDE"], "width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_offset": ["INT", {"default": 0, "min": -255, "max": 255, "step": 1}], "location_x": ["INT", {"default": 256, "min": 0, "max": 4096, "step": 1}], "location_y": ["INT", {"default": 256, "min": 0, "max": 4096, "step": 1}], "size": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 1}], "shape": [["none", "circle", "square", "triangle"], {"default": "none"}], "color": [["white", "amplitude"], {"default": "amplitude"}]}}, "input_order": {"required": ["normalized_amp", "width", "height", "frame_offset", "location_x", "location_y", "size", "shape", "color"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "NormalizedAmplitudeToMask", "display_name": "NormalizedAmplitudeToMask", "description": "\nWorks as a bridge to the AudioScheduler -nodes: \nhttps://github.com/a1lazydog/ComfyUI-AudioScheduler \nCreates masks based on the normalized amplitude.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "NormalizedAmplitudeToFloatList": {"input": {"required": {"normalized_amp": ["NORMALIZED_AMPLITUDE"]}}, "input_order": {"required": ["normalized_amp"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "NormalizedAmplitudeToFloatList", "display_name": "NormalizedAmplitudeToFloatList", "description": "\nWorks as a bridge to the AudioScheduler -nodes: \nhttps://github.com/a1lazydog/ComfyUI-AudioScheduler \nCreates a list of floats from the normalized amplitude.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "OffsetMaskByNormalizedAmplitude": {"input": {"required": {"normalized_amp": ["NORMALIZED_AMPLITUDE"], "mask": ["MASK"], "x": ["INT", {"default": 0, "min": -4096, "max": 16384, "step": 1, "display": "number"}], "y": ["INT", {"default": 0, "min": -4096, "max": 16384, "step": 1, "display": "number"}], "rotate": ["BOOLEAN", {"default": false}], "angle_multiplier": ["FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number"}]}}, "input_order": {"required": ["normalized_amp", "mask", "x", "y", "rotate", "angle_multiplier"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["mask"], "name": "OffsetMaskByNormalizedAmplitude", "display_name": "OffsetMaskByNormalizedAmplitude", "description": "\nWorks as a bridge to the AudioScheduler -nodes: \nhttps://github.com/a1lazydog/ComfyUI-AudioScheduler \nOffsets masks based on the normalized amplitude.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageTransformByNormalizedAmplitude": {"input": {"required": {"normalized_amp": ["NORMALIZED_AMPLITUDE"], "zoom_scale": ["FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number"}], "x_offset": ["INT", {"default": 0, "min": -16383, "max": 16384, "step": 1, "display": "number"}], "y_offset": ["INT", {"default": 0, "min": -16383, "max": 16384, "step": 1, "display": "number"}], "cumulative": ["BOOLEAN", {"default": false}], "image": ["IMAGE"]}}, "input_order": {"required": ["normalized_amp", "zoom_scale", "x_offset", "y_offset", "cumulative", "image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageTransformByNormalizedAmplitude", "display_name": "ImageTransformByNormalizedAmplitude", "description": "\nWorks as a bridge to the AudioScheduler -nodes: \nhttps://github.com/a1lazydog/ComfyUI-AudioScheduler \nTransforms image based on the normalized amplitude.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AudioConcatenate": {"input": {"required": {"audio1": ["AUDIO"], "audio2": ["AUDIO"], "direction": [["right", "left"], {"default": "right"}]}}, "input_order": {"required": ["audio1", "audio2", "direction"]}, "is_input_list": false, "output": ["AUDIO"], "output_is_list": [false], "output_name": ["AUDIO"], "name": "AudioConcatenate", "display_name": "AudioConcatenate", "description": "\nConcatenates the audio1 to audio2 in the specified direction.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SplineEditor": {"input": {"required": {"points_store": ["STRING", {"multiline": false}], "coordinates": ["STRING", {"multiline": false}], "mask_width": ["INT", {"default": 512, "min": 8, "max": 4096, "step": 8}], "mask_height": ["INT", {"default": 512, "min": 8, "max": 4096, "step": 8}], "points_to_sample": ["INT", {"default": 16, "min": 2, "max": 1000, "step": 1}], "sampling_method": [["path", "time", "controlpoints", "speed"], {"default": "time"}], "interpolation": [["cardinal", "monotone", "basis", "linear", "step-before", "step-after", "polar", "polar-reverse"], {"default": "cardinal"}], "tension": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "repeat_output": ["INT", {"default": 1, "min": 1, "max": 4096, "step": 1}], "float_output_type": [["list", "pandas series", "tensor"], {"default": "list"}]}, "optional": {"min_value": ["FLOAT", {"default": 0.0, "min": -10000.0, "max": 10000.0, "step": 0.01}], "max_value": ["FLOAT", {"default": 1.0, "min": -10000.0, "max": 10000.0, "step": 0.01}], "bg_image": ["IMAGE"]}}, "input_order": {"required": ["points_store", "coordinates", "mask_width", "mask_height", "points_to_sample", "sampling_method", "interpolation", "tension", "repeat_output", "float_output_type"], "optional": ["min_value", "max_value", "bg_image"]}, "is_input_list": false, "output": ["MASK", "STRING", "FLOAT", "INT", "STRING"], "output_is_list": [false, false, false, false, false], "output_name": ["mask", "coord_str", "float", "count", "normalized_str"], "name": "SplineEditor", "display_name": "Spline Editor", "description": "\n# WORK IN PROGRESS \nDo not count on this as part of your workflow yet, \nprobably contains lots of bugs and stability is not \nguaranteed!! \n \n## Graphical editor to create values for various \n## schedules and/or mask batches. \n\n**Shift + click** to add control point at end.\n**Ctrl + click** to add control point (subdivide) between two points. \n**Right click on a point** to delete it. \nNote that you can't delete from start/end. \n \nRight click on canvas for context menu: \nNEW!:\n- Add new spline\n - Creates a new spline on same canvas, currently these paths are only outputed \n as coordinates.\n- Add single point\n - Creates a single point that only returns it's current position coords \n- Delete spline\n - Deletes the currently selected spline, you can select a spline by clicking on \n it's path, or cycle through them with the 'Next spline' -option. \n\nThese are purely visual options, doesn't affect the output: \n - Toggle handles visibility\n - Display sample points: display the points to be returned. \n\n**points_to_sample** value sets the number of samples \nreturned from the **drawn spline itself**, this is independent from the \nactual control points, so the interpolation type matters. \nsampling_method: \n - time: samples along the time axis, used for schedules \n - path: samples along the path itself, useful for coordinates \n - controlpoints: samples only the control points themselves \n\noutput types:\n - mask batch \n example compatible nodes: anything that takes masks \n - list of floats\n example compatible nodes: IPAdapter weights \n - pandas series\n example compatible nodes: anything that takes Fizz' \n nodes Batch Value Schedule \n - torch tensor \n example compatible nodes: unknown\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/weights", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateShapeImageOnPath": {"input": {"required": {"shape": [["circle", "square", "triangle"], {"default": "circle"}], "coordinates": ["STRING", {"forceInput": true}], "frame_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "shape_width": ["INT", {"default": 128, "min": 2, "max": 4096, "step": 1}], "shape_height": ["INT", {"default": 128, "min": 2, "max": 4096, "step": 1}], "shape_color": ["STRING", {"default": "white"}], "bg_color": ["STRING", {"default": "black"}], "blur_radius": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}], "intensity": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}]}, "optional": {"size_multiplier": ["FLOAT", {"default": [1.0], "forceInput": true}], "trailing": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}], "border_width": ["INT", {"default": 0, "min": 0, "max": 100, "step": 1}], "border_color": ["STRING", {"default": "black"}]}}, "input_order": {"required": ["shape", "coordinates", "frame_width", "frame_height", "shape_width", "shape_height", "shape_color", "bg_color", "blur_radius", "intensity"], "optional": ["size_multiplier", "trailing", "border_width", "border_color"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["image", "mask"], "name": "CreateShapeImageOnPath", "display_name": "Create Shape Image On Path", "description": "\nCreates an image or batch of images with the specified shape. \nLocations are center locations. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateShapeMaskOnPath": {"input": {"required": {"shape": [["circle", "square", "triangle"], {"default": "circle"}], "coordinates": ["STRING", {"forceInput": true}], "frame_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "shape_width": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 1}], "shape_height": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 1}]}, "optional": {"size_multiplier": ["FLOAT", {"default": [1.0], "forceInput": true}]}}, "input_order": {"required": ["shape", "coordinates", "frame_width", "frame_height", "shape_width", "shape_height"], "optional": ["size_multiplier"]}, "is_input_list": false, "output": ["MASK", "MASK"], "output_is_list": [false, false], "output_name": ["mask", "mask_inverted"], "name": "CreateShapeMaskOnPath", "display_name": "Create Shape Mask On Path", "description": "\nCreates a mask or batch of masks with the specified shape. \nLocations are center locations. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "deprecated": true, "search_aliases": []}, "CreateTextOnPath": {"input": {"required": {"coordinates": ["STRING", {"forceInput": true}], "text": ["STRING", {"default": "text", "multiline": true}], "frame_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "font": [["FreeMono.ttf", "FreeMonoBoldOblique.otf", "TTNorms-Black.otf"]], "font_size": ["INT", {"default": 42}], "alignment": [["left", "center", "right"], {"default": "center"}], "text_color": ["STRING", {"default": "white"}]}, "optional": {"size_multiplier": ["FLOAT", {"default": [1.0], "forceInput": true}]}}, "input_order": {"required": ["coordinates", "text", "frame_width", "frame_height", "font", "font_size", "alignment", "text_color"], "optional": ["size_multiplier"]}, "is_input_list": false, "output": ["IMAGE", "MASK", "MASK"], "output_is_list": [false, false, false], "output_name": ["image", "mask", "mask_inverted"], "name": "CreateTextOnPath", "display_name": "Create Text On Path", "description": "\nCreates a mask or batch of masks with the specified text. \nLocations are center locations. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CreateGradientFromCoords": {"input": {"required": {"coordinates": ["STRING", {"forceInput": true}], "frame_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "start_color": ["STRING", {"default": "white"}], "end_color": ["STRING", {"default": "black"}], "multiplier": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}]}}, "input_order": {"required": ["coordinates", "frame_width", "frame_height", "start_color", "end_color", "multiplier"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "CreateGradientFromCoords", "display_name": "Create Gradient From Coords", "description": "\nCreates a gradient image from coordinates. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CutAndDragOnPath": {"input": {"required": {"image": ["IMAGE"], "coordinates": ["STRING", {"forceInput": true}], "mask": ["MASK"], "frame_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "frame_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "inpaint": ["BOOLEAN", {"default": true}]}, "optional": {"bg_image": ["IMAGE"]}}, "input_order": {"required": ["image", "coordinates", "mask", "frame_width", "frame_height", "inpaint"], "optional": ["bg_image"]}, "is_input_list": false, "output": ["IMAGE", "MASK"], "output_is_list": [false, false], "output_name": ["image", "mask"], "name": "CutAndDragOnPath", "display_name": "Cut And Drag On Path", "description": "\nCuts the masked area from the image, and drags it along the path. If inpaint is enabled, and no bg_image is provided, the cut area is filled using cv2 TELEA algorithm.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GradientToFloat": {"input": {"required": {"image": ["IMAGE"], "steps": ["INT", {"default": 10, "min": 2, "max": 10000, "step": 1}]}}, "input_order": {"required": ["image", "steps"]}, "is_input_list": false, "output": ["FLOAT", "FLOAT"], "output_is_list": [false, false], "output_name": ["float_x", "float_y"], "name": "GradientToFloat", "display_name": "Gradient To Float", "description": "\nCalculates list of floats from image. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "WeightScheduleExtend": {"input": {"required": {"input_values_1": ["FLOAT", {"default": 0.0, "forceInput": true}], "input_values_2": ["FLOAT", {"default": 0.0, "forceInput": true}], "output_type": [["match_input", "list", "pandas series", "tensor"], {"default": "match_input"}]}}, "input_order": {"required": ["input_values_1", "input_values_2", "output_type"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "WeightScheduleExtend", "display_name": "Weight Schedule Extend", "description": "\nExtends, and converts if needed, different value lists/series \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/weights", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaskOrImageToWeight": {"input": {"required": {"output_type": [["list", "pandas series", "tensor", "string"], {"default": "list"}]}, "optional": {"images": ["IMAGE"], "masks": ["MASK"]}}, "input_order": {"required": ["output_type"], "optional": ["images", "masks"]}, "is_input_list": false, "output": ["FLOAT", "STRING"], "output_is_list": [false, false], "output_name": ["FLOAT", "STRING"], "name": "MaskOrImageToWeight", "display_name": "Mask Or Image To Weight", "description": "\nGets the mean values from mask or image batch \nand returns that as the selected output type. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/weights", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "WeightScheduleConvert": {"input": {"required": {"input_values": ["FLOAT", {"default": 0.0, "forceInput": true}], "output_type": [["match_input", "list", "pandas series", "tensor"], {"default": "list"}], "invert": ["BOOLEAN", {"default": false}], "repeat": ["INT", {"default": 1, "min": 1, "max": 255, "step": 1}]}, "optional": {"remap_to_frames": ["INT", {"default": 0}], "interpolation_curve": ["FLOAT", {"forceInput": true}], "remap_values": ["BOOLEAN", {"default": false}], "remap_min": ["FLOAT", {"default": 0.0, "min": -100000, "max": 100000.0, "step": 0.01}], "remap_max": ["FLOAT", {"default": 1.0, "min": -100000, "max": 100000.0, "step": 0.01}]}}, "input_order": {"required": ["input_values", "output_type", "invert", "repeat"], "optional": ["remap_to_frames", "interpolation_curve", "remap_values", "remap_min", "remap_max"]}, "is_input_list": false, "output": ["FLOAT", "STRING", "INT"], "output_is_list": [false, false, false], "output_name": ["FLOAT", "STRING", "INT"], "name": "WeightScheduleConvert", "display_name": "Weight Schedule Convert", "description": "\nConverts different value lists/series to another type. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/weights", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FloatToMask": {"input": {"required": {"input_values": ["FLOAT", {"forceInput": true, "default": 0}], "width": ["INT", {"default": 100, "min": 1}], "height": ["INT", {"default": 100, "min": 1}]}}, "input_order": {"required": ["input_values", "width", "height"]}, "is_input_list": false, "output": ["MASK"], "output_is_list": [false], "output_name": ["MASK"], "name": "FloatToMask", "display_name": "Float To Mask", "description": "\nGenerates a batch of masks based on the input float values.\nThe batch size is determined by the length of the input float values.\nEach mask is generated with the specified width and height.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/masking/generate", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FloatToSigmas": {"input": {"required": {"float_list": ["FLOAT", {"default": 0.0, "forceInput": true}]}}, "input_order": {"required": ["float_list"]}, "is_input_list": false, "output": ["SIGMAS"], "output_is_list": [false], "output_name": ["SIGMAS"], "name": "FloatToSigmas", "display_name": "Float To Sigmas", "description": "\nCreates a sigmas tensor from list of float values. \n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/noise", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SigmasToFloat": {"input": {"required": {"sigmas": ["SIGMAS"]}}, "input_order": {"required": ["sigmas"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["float"], "name": "SigmasToFloat", "display_name": "Sigmas To Float", "description": "\nCreates a float list from sigmas tensors. \n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/noise", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PlotCoordinates": {"input": {"required": {"coordinates": ["STRING", {"forceInput": true}], "text": ["STRING", {"default": "title", "multiline": false}], "width": ["INT", {"default": 512, "min": 8, "max": 4096, "step": 8}], "height": ["INT", {"default": 512, "min": 8, "max": 4096, "step": 8}], "bbox_width": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 8}], "bbox_height": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 8}]}, "optional": {"size_multiplier": ["FLOAT", {"default": [1.0], "forceInput": true}]}}, "input_order": {"required": ["coordinates", "text", "width", "height", "bbox_width", "bbox_height"], "optional": ["size_multiplier"]}, "is_input_list": false, "output": ["IMAGE", "INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false, false], "output_name": ["images", "width", "height", "bbox_width", "bbox_height"], "name": "PlotCoordinates", "display_name": "Plot Coordinates", "description": "\nPlots coordinates to sequence of images using Matplotlib. \n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "InterpolateCoords": {"input": {"required": {"coordinates": ["STRING", {"forceInput": true}], "interpolation_curve": ["FLOAT", {"forceInput": true}]}}, "input_order": {"required": ["coordinates", "interpolation_curve"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["coordinates"], "name": "InterpolateCoords", "display_name": "Interpolate Coords", "description": "\nInterpolates coordinates based on a curve. \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PointsEditor": {"input": {"required": {"points_store": ["STRING", {"multiline": false}], "coordinates": ["STRING", {"multiline": false}], "neg_coordinates": ["STRING", {"multiline": false}], "bbox_store": ["STRING", {"multiline": false}], "bboxes": ["STRING", {"multiline": false}], "bbox_format": [["xyxy", "xywh"]], "width": ["INT", {"default": 512, "min": 8, "max": 4096, "step": 8}], "height": ["INT", {"default": 512, "min": 8, "max": 4096, "step": 8}], "normalize": ["BOOLEAN", {"default": false}]}, "optional": {"bg_image": ["IMAGE"]}}, "input_order": {"required": ["points_store", "coordinates", "neg_coordinates", "bbox_store", "bboxes", "bbox_format", "width", "height", "normalize"], "optional": ["bg_image"]}, "is_input_list": false, "output": ["STRING", "STRING", "BBOX", "MASK", "IMAGE"], "output_is_list": [false, false, false, false, false], "output_name": ["positive_coords", "negative_coords", "bbox", "bbox_mask", "cropped_image"], "name": "PointsEditor", "display_name": "Points Editor", "description": "\n# WORK IN PROGRESS \nDo not count on this as part of your workflow yet, \nprobably contains lots of bugs and stability is not \nguaranteed!! \n \n## Graphical editor to create coordinates\n\n**Shift + click** to add a positive (green) point.\n**Shift + right click** to add a negative (red) point.\n**Ctrl + click** to draw a box. \n**Right click on a point** to delete it. \nNote that you can't delete from start/end of the points array. \n \nTo add an image select the node and copy/paste or drag in the image. \nOr from the bg_image input on queue (first frame of the batch). \n\n**THE IMAGE IS SAVED TO THE NODE AND WORKFLOW METADATA** \nyou can clear the image from the context menu by right clicking on the canvas \n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SoundReactive": {"input": {"required": {"sound_level": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 99999, "step": 0.01}], "start_range_hz": ["INT", {"default": 150, "min": 0, "max": 9999, "step": 1}], "end_range_hz": ["INT", {"default": 2000, "min": 0, "max": 9999, "step": 1}], "multiplier": ["FLOAT", {"default": 1.0, "min": 0.01, "max": 99999, "step": 0.01}], "smoothing_factor": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "normalize": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["sound_level", "start_range_hz", "end_range_hz", "multiplier", "smoothing_factor", "normalize"]}, "is_input_list": false, "output": ["FLOAT", "INT"], "output_is_list": [false, false], "output_name": ["sound_level", "sound_level_int"], "name": "SoundReactive", "display_name": "Sound Reactive", "description": "\nReacts to the sound level of the input. \nUses your browsers sound input options and requires. \nMeant to be used with realtime diffusion with autoqueue.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/audio", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StableZero123_BatchSchedule": {"input": {"required": {"clip_vision": ["CLIP_VISION"], "init_image": ["IMAGE"], "vae": ["VAE"], "width": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "height": ["INT", {"default": 256, "min": 16, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 4096}], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out"]], "azimuth_points_string": ["STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": true}], "elevation_points_string": ["STRING", {"default": "0:(0.0),\n7:(0.0),\n15:(0.0)\n", "multiline": true}]}}, "input_order": {"required": ["clip_vision", "init_image", "vae", "width", "height", "batch_size", "interpolation", "azimuth_points_string", "elevation_points_string"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "name": "StableZero123_BatchSchedule", "display_name": "Stable Zero123 Batch Schedule", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SV3D_BatchSchedule": {"input": {"required": {"clip_vision": ["CLIP_VISION"], "init_image": ["IMAGE"], "vae": ["VAE"], "width": ["INT", {"default": 576, "min": 16, "max": 16384, "step": 8}], "height": ["INT", {"default": 576, "min": 16, "max": 16384, "step": 8}], "batch_size": ["INT", {"default": 21, "min": 1, "max": 4096}], "interpolation": [["linear", "ease_in", "ease_out", "ease_in_out"]], "azimuth_points_string": ["STRING", {"default": "0:(0.0),\n9:(180.0),\n20:(360.0)\n", "multiline": true}], "elevation_points_string": ["STRING", {"default": "0:(0.0),\n9:(0.0),\n20:(0.0)\n", "multiline": true}]}}, "input_order": {"required": ["clip_vision", "init_image", "vae", "width", "height", "batch_size", "interpolation", "azimuth_points_string", "elevation_points_string"]}, "is_input_list": false, "output": ["CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false], "output_name": ["positive", "negative", "latent"], "name": "SV3D_BatchSchedule", "display_name": "SV3D Batch Schedule", "description": "\nAllow scheduling of the azimuth and elevation conditions for SV3D. \nNote that SV3D is still a video model and the schedule needs to always go forward \nhttps://huggingface.co/stabilityai/sv3d\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LoadResAdapterNormalization": {"input": {"required": {"model": ["MODEL"], "resadapter_path": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"]]}}, "input_order": {"required": ["model", "resadapter_path"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "LoadResAdapterNormalization", "display_name": "LoadResAdapterNormalization", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Superprompt": {"input": {"required": {"instruction_prompt": ["STRING", {"default": "Expand the following prompt to add more detail", "multiline": true}], "prompt": ["STRING", {"default": "", "multiline": true, "forceInput": true}], "max_new_tokens": ["INT", {"default": 128, "min": 1, "max": 4096, "step": 1}]}}, "input_order": {"required": ["instruction_prompt", "prompt", "max_new_tokens"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "Superprompt", "display_name": "Superprompt", "description": "\n# SuperPrompt\nA T5 model fine-tuned on the SuperPrompt dataset for \nupsampling text prompts to more detailed descriptions. \nMeant to be used as a pre-generation step for text-to-image \nmodels that benefit from more detailed prompts. \nhttps://huggingface.co/roborovski/superprompt-v1\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/text", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GLIGENTextBoxApplyBatchCoords": {"input": {"required": {"conditioning_to": ["CONDITIONING"], "latents": ["LATENT"], "clip": ["CLIP"], "gligen_textbox_model": ["GLIGEN"], "coordinates": ["STRING", {"forceInput": true}], "text": ["STRING", {"multiline": true}], "width": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 8}], "height": ["INT", {"default": 128, "min": 8, "max": 4096, "step": 8}]}, "optional": {"size_multiplier": ["FLOAT", {"default": [1.0], "forceInput": true}]}}, "input_order": {"required": ["conditioning_to", "latents", "clip", "gligen_textbox_model", "coordinates", "text", "width", "height"], "optional": ["size_multiplier"]}, "is_input_list": false, "output": ["CONDITIONING", "IMAGE"], "output_is_list": [false, false], "output_name": ["conditioning", "coord_preview"], "name": "GLIGENTextBoxApplyBatchCoords", "display_name": "GLIGENTextBoxApplyBatchCoords", "description": "\nThis node allows scheduling GLIGEN text box positions in a batch, \nto be used with AnimateDiff-Evolved. Intended to pair with the \nSpline Editor -node. \n\nGLIGEN model can be downloaded through the Manage's \"Install Models\" menu. \nOr directly from here: \nhttps://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/tree/main \n \nInputs: \n- **latents** input is used to calculate batch size \n- **clip** is your standard text encoder, use same as for the main prompt \n- **gligen_textbox_model** connects to GLIGEN Loader \n- **coordinates** takes a json string of points, directly compatible \nwith the spline editor node.\n- **text** is the part of the prompt to set position for \n- **width** and **height** are the size of the GLIGEN bounding box \n \nOutputs:\n- **conditioning** goes between to clip text encode and the sampler \n- **coord_preview** is an optional preview of the coordinates and \nbounding boxes.\n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Intrinsic_lora_sampling": {"input": {"required": {"model": ["MODEL"], "lora_name": [["intrinsic_lora_sd15_albedo.safetensors", "intrinsic_lora_sd15_depth.safetensors", "intrinsic_lora_sd15_normal.safetensors", "intrinsic_lora_sd15_shading.safetensors", "intrinsic_loras.txt"]], "task": [["depth map", "surface normals", "albedo", "shading"], {"default": "depth map"}], "text": ["STRING", {"multiline": true, "default": ""}], "clip": ["CLIP"], "vae": ["VAE"], "per_batch": ["INT", {"default": 16, "min": 1, "max": 4096, "step": 1}]}, "optional": {"image": ["IMAGE"], "optional_latent": ["LATENT"]}}, "input_order": {"required": ["model", "lora_name", "task", "text", "clip", "vae", "per_batch"], "optional": ["image", "optional_latent"]}, "is_input_list": false, "output": ["IMAGE", "LATENT"], "output_is_list": [false, false], "output_name": ["IMAGE", "LATENT"], "name": "Intrinsic_lora_sampling", "display_name": "Intrinsic Lora Sampling", "description": "\nSampler to use the intrinsic loras: \nhttps://github.com/duxiaodan/intrinsic-lora \nThese LoRAs are tiny and thus included \nwith this node pack.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CheckpointPerturbWeights": {"input": {"required": {"model": ["MODEL"], "joint_blocks": ["FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}], "final_layer": ["FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}], "rest_of_the_blocks": ["FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}], "seed": ["INT", {"default": 123, "min": 0, "max": 18446744073709551615, "step": 1}]}}, "input_order": {"required": ["model", "joint_blocks", "final_layer", "rest_of_the_blocks", "seed"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "CheckpointPerturbWeights", "display_name": "CheckpointPerturbWeights", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Screencap_mss": {"input": {"required": {"x": ["INT", {"default": 0, "min": 0, "max": 10000, "step": 1}], "y": ["INT", {"default": 0, "min": 0, "max": 10000, "step": 1}], "width": ["INT", {"default": 512, "min": 0, "max": 10000, "step": 1}], "height": ["INT", {"default": 512, "min": 0, "max": 10000, "step": 1}], "num_frames": ["INT", {"default": 1, "min": 1, "max": 255, "step": 1}], "delay": ["FLOAT", {"default": 0.1, "min": 0.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["x", "y", "width", "height", "num_frames", "delay"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "Screencap_mss", "display_name": "Screencap mss", "description": "\nCaptures an area specified by screen coordinates. \nCan be used for realtime diffusion with autoqueue.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "WebcamCaptureCV2": {"input": {"required": {"x": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}], "y": ["INT", {"default": 0, "min": 0, "max": 4096, "step": 1}], "width": ["INT", {"default": 512, "min": 0, "max": 4096, "step": 1}], "height": ["INT", {"default": 512, "min": 0, "max": 4096, "step": 1}], "cam_index": ["INT", {"default": 0, "min": 0, "max": 255, "step": 1}], "release": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["x", "y", "width", "height", "cam_index", "release"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "WebcamCaptureCV2", "display_name": "Webcam Capture CV2", "description": "\nCaptures a frame from a webcam using CV2. \nCan be used for realtime diffusion with autoqueue.\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DifferentialDiffusionAdvanced": {"input": {"required": {"model": ["MODEL"], "samples": ["LATENT"], "mask": ["MASK"], "multiplier": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}]}}, "input_order": {"required": ["model", "samples", "mask", "multiplier"]}, "is_input_list": false, "output": ["MODEL", "LATENT"], "output_is_list": [false, false], "output_name": ["MODEL", "LATENT"], "name": "DifferentialDiffusionAdvanced", "display_name": "Differential Diffusion Advanced", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "_for_testing", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DiTBlockLoraLoader": {"input": {"required": {"model": ["MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}], "strength_model": ["FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}]}, "optional": {"lora_name": [["AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"], {"tooltip": "The name of the LoRA."}], "opt_lora_path": ["STRING", {"forceInput": true, "tooltip": "Absolute path of the LoRA."}], "blocks": ["SELECTEDDITBLOCKS"]}}, "input_order": {"required": ["model", "strength_model"], "optional": ["lora_name", "opt_lora_path", "blocks"]}, "is_input_list": false, "output": ["MODEL", "STRING"], "output_is_list": [false, false], "output_name": ["model", "rank"], "name": "DiTBlockLoraLoader", "display_name": "DiT Block Lora Loader", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model.", "possible rank of the LoRA."], "search_aliases": []}, "FluxBlockLoraSelect": {"input": {"required": {"double_blocks.0.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.1.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.2.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.3.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.4.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.5.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.6.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.7.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.8.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.9.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.10.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.11.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.12.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.13.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.14.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.15.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.16.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.17.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.18.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.0.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.1.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.2.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.3.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.4.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.5.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.6.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.7.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.8.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.9.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.10.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.11.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.12.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.13.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.14.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.15.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.16.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.17.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.18.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.19.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.20.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.21.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.22.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.23.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.24.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.25.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.26.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.27.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.28.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.29.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.30.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.31.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.32.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.33.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.34.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.35.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.36.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.37.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}]}}, "input_order": {"required": ["double_blocks.0.", "double_blocks.1.", "double_blocks.2.", "double_blocks.3.", "double_blocks.4.", "double_blocks.5.", "double_blocks.6.", "double_blocks.7.", "double_blocks.8.", "double_blocks.9.", "double_blocks.10.", "double_blocks.11.", "double_blocks.12.", "double_blocks.13.", "double_blocks.14.", "double_blocks.15.", "double_blocks.16.", "double_blocks.17.", "double_blocks.18.", "single_blocks.0.", "single_blocks.1.", "single_blocks.2.", "single_blocks.3.", "single_blocks.4.", "single_blocks.5.", "single_blocks.6.", "single_blocks.7.", "single_blocks.8.", "single_blocks.9.", "single_blocks.10.", "single_blocks.11.", "single_blocks.12.", "single_blocks.13.", "single_blocks.14.", "single_blocks.15.", "single_blocks.16.", "single_blocks.17.", "single_blocks.18.", "single_blocks.19.", "single_blocks.20.", "single_blocks.21.", "single_blocks.22.", "single_blocks.23.", "single_blocks.24.", "single_blocks.25.", "single_blocks.26.", "single_blocks.27.", "single_blocks.28.", "single_blocks.29.", "single_blocks.30.", "single_blocks.31.", "single_blocks.32.", "single_blocks.33.", "single_blocks.34.", "single_blocks.35.", "single_blocks.36.", "single_blocks.37."]}, "is_input_list": false, "output": ["SELECTEDDITBLOCKS"], "output_is_list": [false], "output_name": ["blocks"], "name": "FluxBlockLoraSelect", "display_name": "Flux Block Lora Select", "description": "Select individual block alpha values, value of 0 removes the block altogether", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model."], "search_aliases": []}, "HunyuanVideoBlockLoraSelect": {"input": {"required": {"double_blocks.0.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.1.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.2.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.3.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.4.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.5.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.6.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.7.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.8.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.9.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.10.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.11.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.12.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.13.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.14.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.15.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.16.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.17.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.18.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "double_blocks.19.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.0.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.1.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.2.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.3.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.4.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.5.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.6.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.7.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.8.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.9.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.10.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.11.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.12.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.13.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.14.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.15.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.16.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.17.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.18.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.19.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.20.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.21.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.22.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.23.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.24.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.25.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.26.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.27.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.28.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.29.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.30.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.31.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.32.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.33.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.34.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.35.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.36.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.37.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.38.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "single_blocks.39.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}]}}, "input_order": {"required": ["double_blocks.0.", "double_blocks.1.", "double_blocks.2.", "double_blocks.3.", "double_blocks.4.", "double_blocks.5.", "double_blocks.6.", "double_blocks.7.", "double_blocks.8.", "double_blocks.9.", "double_blocks.10.", "double_blocks.11.", "double_blocks.12.", "double_blocks.13.", "double_blocks.14.", "double_blocks.15.", "double_blocks.16.", "double_blocks.17.", "double_blocks.18.", "double_blocks.19.", "single_blocks.0.", "single_blocks.1.", "single_blocks.2.", "single_blocks.3.", "single_blocks.4.", "single_blocks.5.", "single_blocks.6.", "single_blocks.7.", "single_blocks.8.", "single_blocks.9.", "single_blocks.10.", "single_blocks.11.", "single_blocks.12.", "single_blocks.13.", "single_blocks.14.", "single_blocks.15.", "single_blocks.16.", "single_blocks.17.", "single_blocks.18.", "single_blocks.19.", "single_blocks.20.", "single_blocks.21.", "single_blocks.22.", "single_blocks.23.", "single_blocks.24.", "single_blocks.25.", "single_blocks.26.", "single_blocks.27.", "single_blocks.28.", "single_blocks.29.", "single_blocks.30.", "single_blocks.31.", "single_blocks.32.", "single_blocks.33.", "single_blocks.34.", "single_blocks.35.", "single_blocks.36.", "single_blocks.37.", "single_blocks.38.", "single_blocks.39."]}, "is_input_list": false, "output": ["SELECTEDDITBLOCKS"], "output_is_list": [false], "output_name": ["blocks"], "name": "HunyuanVideoBlockLoraSelect", "display_name": "Hunyuan Video Block Lora Select", "description": "Select individual block alpha values, value of 0 removes the block altogether", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model."], "search_aliases": []}, "Wan21BlockLoraSelect": {"input": {"required": {"blocks.0.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.1.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.2.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.3.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.4.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.5.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.6.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.7.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.8.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.9.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.10.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.11.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.12.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.13.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.14.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.15.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.16.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.17.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.18.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.19.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.20.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.21.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.22.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.23.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.24.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.25.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.26.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.27.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.28.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.29.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.30.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.31.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.32.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.33.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.34.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.35.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.36.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.37.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.38.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}], "blocks.39.": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}]}}, "input_order": {"required": ["blocks.0.", "blocks.1.", "blocks.2.", "blocks.3.", "blocks.4.", "blocks.5.", "blocks.6.", "blocks.7.", "blocks.8.", "blocks.9.", "blocks.10.", "blocks.11.", "blocks.12.", "blocks.13.", "blocks.14.", "blocks.15.", "blocks.16.", "blocks.17.", "blocks.18.", "blocks.19.", "blocks.20.", "blocks.21.", "blocks.22.", "blocks.23.", "blocks.24.", "blocks.25.", "blocks.26.", "blocks.27.", "blocks.28.", "blocks.29.", "blocks.30.", "blocks.31.", "blocks.32.", "blocks.33.", "blocks.34.", "blocks.35.", "blocks.36.", "blocks.37.", "blocks.38.", "blocks.39."]}, "is_input_list": false, "output": ["SELECTEDDITBLOCKS"], "output_is_list": [false], "output_name": ["blocks"], "name": "Wan21BlockLoraSelect", "display_name": "Wan21 Block Lora Select", "description": "Select individual block alpha values, value of 0 removes the block altogether", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "output_tooltips": ["The modified diffusion model."], "search_aliases": []}, "CustomControlNetWeightsFluxFromList": {"input": {"required": {"list_of_floats": ["FLOAT", {"forceInput": true}]}, "optional": {"uncond_multiplier": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}], "cn_extras": ["CN_WEIGHTS_EXTRAS"], "autosize": ["ACNAUTOSIZE", {"padding": 0}]}}, "input_order": {"required": ["list_of_floats"], "optional": ["uncond_multiplier", "cn_extras", "autosize"]}, "is_input_list": false, "output": ["CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME"], "output_is_list": [false, false], "output_name": ["CN_WEIGHTS", "TK_SHORTCUT"], "name": "CustomControlNetWeightsFluxFromList", "display_name": "Custom ControlNet Weights Flux From List", "description": "Creates controlnet weights from a list of floats for Advanced-ControlNet", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/controlnet", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CheckpointLoaderKJ": {"input": {"required": {"ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"], {"tooltip": "The name of the checkpoint (model) to load."}], "weight_dtype": [["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2", "fp16", "bf16", "fp32"]], "compute_dtype": [["default", "fp16", "bf16", "fp32"], {"default": "default", "tooltip": "The compute dtype to use for the model."}], "patch_cublaslinear": ["BOOLEAN", {"default": false, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}], "sage_attention": [["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda", "sageattn_qk_int8_pv_fp8_cuda++"], {"default": false, "tooltip": "Patch comfy attention to use sageattn."}], "enable_fp16_accumulation": ["BOOLEAN", {"default": false, "tooltip": "Enable torch.backends.cuda.matmul.allow_fp16_accumulation, requires pytorch 2.7.0 nightly."}]}}, "input_order": {"required": ["ckpt_name", "weight_dtype", "compute_dtype", "patch_cublaslinear", "sage_attention", "enable_fp16_accumulation"]}, "is_input_list": false, "output": ["MODEL", "CLIP", "VAE"], "output_is_list": [false, false, false], "output_name": ["MODEL", "CLIP", "VAE"], "name": "CheckpointLoaderKJ", "display_name": "CheckpointLoaderKJ", "description": "Experimental node for patching torch.nn.Linear with CublasLinear.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "DiffusionModelLoaderKJ": {"input": {"required": {"model_name": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-dev.sft", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux1-schnell.sft", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan3d-dit-v2-1/model.fp16.ckpt", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors"], {"tooltip": "The name of the checkpoint (model) to load."}], "weight_dtype": [["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2", "fp16", "bf16", "fp32"]], "compute_dtype": [["default", "fp16", "bf16", "fp32"], {"default": "default", "tooltip": "The compute dtype to use for the model."}], "patch_cublaslinear": ["BOOLEAN", {"default": false, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}], "sage_attention": [["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda", "sageattn_qk_int8_pv_fp8_cuda++"], {"default": false, "tooltip": "Patch comfy attention to use sageattn."}], "enable_fp16_accumulation": ["BOOLEAN", {"default": false, "tooltip": "Enable torch.backends.cuda.matmul.allow_fp16_accumulation, requires pytorch 2.7.0 nightly."}]}}, "input_order": {"required": ["model_name", "weight_dtype", "compute_dtype", "patch_cublaslinear", "sage_attention", "enable_fp16_accumulation"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "DiffusionModelLoaderKJ", "display_name": "Diffusion Model Loader KJ", "description": "Node for patching torch.nn.Linear with CublasLinear.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "TorchCompileModelFluxAdvanced": {"input": {"required": {"model": ["MODEL"], "backend": [["inductor", "cudagraphs"]], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}], "double_blocks": ["STRING", {"default": "0-18", "multiline": true}], "single_blocks": ["STRING", {"default": "0-37", "multiline": true}], "dynamic": ["BOOLEAN", {"default": false, "tooltip": "Enable dynamic mode"}]}, "optional": {"dynamo_cache_size_limit": ["INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}]}}, "input_order": {"required": ["model", "backend", "fullgraph", "mode", "double_blocks", "single_blocks", "dynamic"], "optional": ["dynamo_cache_size_limit"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "TorchCompileModelFluxAdvanced", "display_name": "TorchCompileModelFluxAdvanced", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "deprecated": true, "experimental": true, "search_aliases": []}, "TorchCompileModelFluxAdvancedV2": {"input": {"required": {"model": ["MODEL"], "backend": [["inductor", "cudagraphs"]], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}], "double_blocks": ["BOOLEAN", {"default": true, "tooltip": "Compile double blocks"}], "single_blocks": ["BOOLEAN", {"default": true, "tooltip": "Compile single blocks"}], "dynamic": ["BOOLEAN", {"default": false, "tooltip": "Enable dynamic mode"}]}, "optional": {"dynamo_cache_size_limit": ["INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}]}}, "input_order": {"required": ["model", "backend", "fullgraph", "mode", "double_blocks", "single_blocks", "dynamic"], "optional": ["dynamo_cache_size_limit"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "TorchCompileModelFluxAdvancedV2", "display_name": "TorchCompileModelFluxAdvancedV2", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "TorchCompileModelHyVideo": {"input": {"required": {"model": ["MODEL"], "backend": [["inductor", "cudagraphs"], {"default": "inductor"}], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}], "dynamic": ["BOOLEAN", {"default": false, "tooltip": "Enable dynamic mode"}], "dynamo_cache_size_limit": ["INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}], "compile_single_blocks": ["BOOLEAN", {"default": true, "tooltip": "Compile single blocks"}], "compile_double_blocks": ["BOOLEAN", {"default": true, "tooltip": "Compile double blocks"}], "compile_txt_in": ["BOOLEAN", {"default": false, "tooltip": "Compile txt_in layers"}], "compile_vector_in": ["BOOLEAN", {"default": false, "tooltip": "Compile vector_in layers"}], "compile_final_layer": ["BOOLEAN", {"default": false, "tooltip": "Compile final layer"}]}}, "input_order": {"required": ["model", "backend", "fullgraph", "mode", "dynamic", "dynamo_cache_size_limit", "compile_single_blocks", "compile_double_blocks", "compile_txt_in", "compile_vector_in", "compile_final_layer"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "TorchCompileModelHyVideo", "display_name": "TorchCompileModelHyVideo", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "TorchCompileVAE": {"input": {"required": {"vae": ["VAE"], "backend": [["inductor", "cudagraphs"]], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}], "compile_encoder": ["BOOLEAN", {"default": true, "tooltip": "Compile encoder"}], "compile_decoder": ["BOOLEAN", {"default": true, "tooltip": "Compile decoder"}]}}, "input_order": {"required": ["vae", "backend", "fullgraph", "mode", "compile_encoder", "compile_decoder"]}, "is_input_list": false, "output": ["VAE"], "output_is_list": [false], "output_name": ["VAE"], "name": "TorchCompileVAE", "display_name": "TorchCompileVAE", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "TorchCompileControlNet": {"input": {"required": {"controlnet": ["CONTROL_NET"], "backend": [["inductor", "cudagraphs"]], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}]}}, "input_order": {"required": ["controlnet", "backend", "fullgraph", "mode"]}, "is_input_list": false, "output": ["CONTROL_NET"], "output_is_list": [false], "output_name": ["CONTROL_NET"], "name": "TorchCompileControlNet", "display_name": "TorchCompileControlNet", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "PatchModelPatcherOrder": {"input": {"required": {"model": ["MODEL"], "patch_order": [["object_patch_first", "weight_patch_first"], {"default": "weight_patch_first", "tooltip": "Patch the comfy patch_model function to load weight patches (LoRAs) before compiling the model"}], "full_load": [["enabled", "disabled", "auto"], {"default": "auto", "tooltip": "Disabling may help with memory issues when loading large models, when changing this you should probably force model reload to avoid issues!"}]}}, "input_order": {"required": ["model", "patch_order", "full_load"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "PatchModelPatcherOrder", "display_name": "Patch Model Patcher Order", "description": "Patch the comfy patch_model function patching order, useful for torch.compile (used as object_patch) as it should come last if you want to use LoRAs with compile", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "TorchCompileLTXModel": {"input": {"required": {"model": ["MODEL"], "backend": [["inductor", "cudagraphs"]], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}], "dynamic": ["BOOLEAN", {"default": false, "tooltip": "Enable dynamic mode"}]}}, "input_order": {"required": ["model", "backend", "fullgraph", "mode", "dynamic"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "TorchCompileLTXModel", "display_name": "TorchCompileLTXModel", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "TorchCompileCosmosModel": {"input": {"required": {"model": ["MODEL"], "backend": [["inductor", "cudagraphs"]], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}], "dynamic": ["BOOLEAN", {"default": false, "tooltip": "Enable dynamic mode"}], "dynamo_cache_size_limit": ["INT", {"default": 64, "tooltip": "Set the dynamo cache size limit"}]}}, "input_order": {"required": ["model", "backend", "fullgraph", "mode", "dynamic", "dynamo_cache_size_limit"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "TorchCompileCosmosModel", "display_name": "TorchCompileCosmosModel", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "TorchCompileModelWanVideo": {"input": {"required": {"model": ["MODEL"], "backend": [["inductor", "cudagraphs"], {"default": "inductor"}], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}], "dynamic": ["BOOLEAN", {"default": false, "tooltip": "Enable dynamic mode"}], "dynamo_cache_size_limit": ["INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}], "compile_transformer_blocks_only": ["BOOLEAN", {"default": false, "tooltip": "Compile only transformer blocks"}]}}, "input_order": {"required": ["model", "backend", "fullgraph", "mode", "dynamic", "dynamo_cache_size_limit", "compile_transformer_blocks_only"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "TorchCompileModelWanVideo", "display_name": "TorchCompileModelWanVideo", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "deprecated": true, "experimental": true, "search_aliases": []}, "TorchCompileModelWanVideoV2": {"input": {"required": {"model": ["MODEL"], "backend": [["inductor", "cudagraphs"], {"default": "inductor"}], "fullgraph": ["BOOLEAN", {"default": false, "tooltip": "Enable full graph mode"}], "mode": [["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}], "dynamic": ["BOOLEAN", {"default": false, "tooltip": "Enable dynamic mode"}], "compile_transformer_blocks_only": ["BOOLEAN", {"default": true, "tooltip": "Compile only transformer blocks, faster compile and less error prone"}], "dynamo_cache_size_limit": ["INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}]}}, "input_order": {"required": ["model", "backend", "fullgraph", "mode", "dynamic", "compile_transformer_blocks_only", "dynamo_cache_size_limit"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "TorchCompileModelWanVideoV2", "display_name": "TorchCompileModelWanVideoV2", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/torchcompile", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "PathchSageAttentionKJ": {"input": {"required": {"model": ["MODEL"], "sage_attention": [["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda", "sageattn_qk_int8_pv_fp8_cuda++"], {"default": false, "tooltip": "Global patch comfy attention to use sageattn, once patched to revert back to normal you would need to run this node again with disabled option."}]}}, "input_order": {"required": ["model", "sage_attention"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "PathchSageAttentionKJ", "display_name": "Patch Sage Attention KJ", "description": "Experimental node for patching attention mode. This doesn't use the model patching system and thus can't be disabled without running the node again with 'disabled' option.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "LeapfusionHunyuanI2VPatcher": {"input": {"required": {"model": ["MODEL"], "latent": ["LATENT"], "index": ["INT", {"default": 0, "min": -1, "max": 1000, "step": 1, "tooltip": "The index of the latent to be replaced. 0 for first frame and -1 for last"}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The start percentage of steps to apply"}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The end percentage of steps to apply"}], "strength": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}]}}, "input_order": {"required": ["model", "latent", "index", "start_percent", "end_percent", "strength"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "LeapfusionHunyuanI2VPatcher", "display_name": "Leapfusion Hunyuan I2V Patcher", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VAELoaderKJ": {"input": {"required": {"vae_name": [["FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors", "taesd", "taesdxl", "taesd3", "taef1"]], "device": [["main_device", "cpu"]], "weight_dtype": [["bf16", "fp16", "fp32"]]}}, "input_order": {"required": ["vae_name", "device", "weight_dtype"]}, "is_input_list": false, "output": ["VAE"], "output_is_list": [false], "output_name": ["VAE"], "name": "VAELoaderKJ", "display_name": "VAELoader KJ", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/vae", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ScheduledCFGGuidance": {"input": {"required": {"model": ["MODEL"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "cfg": ["FLOAT", {"default": 6.0, "min": 0.0, "max": 100.0, "step": 0.01}], "start_percent": ["FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}]}}, "input_order": {"required": ["model", "positive", "negative", "cfg", "start_percent", "end_percent"]}, "is_input_list": false, "output": ["GUIDER"], "output_is_list": [false], "output_name": ["GUIDER"], "name": "ScheduledCFGGuidance", "display_name": "Scheduled CFG Guidance", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ApplyRifleXRoPE_HunuyanVideo": {"input": {"required": {"model": ["MODEL"], "latent": ["LATENT", {"tooltip": "Only used to get the latent count"}], "k": ["INT", {"default": 4, "min": 1, "max": 100, "step": 1, "tooltip": "Index of intrinsic frequency"}]}}, "input_order": {"required": ["model", "latent", "k"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ApplyRifleXRoPE_HunuyanVideo", "display_name": "Apply RifleXRoPE HunuyanVideo", "description": "Extends the potential frame count of HunyuanVideo using this method: https://github.com/thu-ml/RIFLEx", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "ApplyRifleXRoPE_WanVideo": {"input": {"required": {"model": ["MODEL"], "latent": ["LATENT", {"tooltip": "Only used to get the latent count"}], "k": ["INT", {"default": 6, "min": 1, "max": 100, "step": 1, "tooltip": "Index of intrinsic frequency"}]}}, "input_order": {"required": ["model", "latent", "k"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ApplyRifleXRoPE_WanVideo", "display_name": "Apply RifleXRoPE WanVideo", "description": "Extends the potential frame count of HunyuanVideo using this method: https://github.com/thu-ml/RIFLEx", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "WanVideoTeaCacheKJ": {"input": {"required": {"model": ["MODEL"], "rel_l1_thresh": ["FLOAT", {"default": 0.275, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Threshold for to determine when to apply the cache, compromise between speed and accuracy. When using coefficients a good value range is something between 0.2-0.4 for all but 1.3B model, which should be about 10 times smaller, same as when not using coefficients."}], "start_percent": ["FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The start percentage of the steps to use with TeaCache."}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The end percentage of the steps to use with TeaCache."}], "cache_device": [["main_device", "offload_device"], {"default": "offload_device", "tooltip": "Device to cache to"}], "coefficients": [["disabled", "1.3B", "14B", "i2v_480", "i2v_720"], {"default": "i2v_480", "tooltip": "Coefficients for rescaling the relative l1 distance, if disabled the threshold value should be about 10 times smaller than the value used with coefficients."}]}}, "input_order": {"required": ["model", "rel_l1_thresh", "start_percent", "end_percent", "cache_device", "coefficients"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["model"], "name": "WanVideoTeaCacheKJ", "display_name": "WanVideo Tea Cache (native)", "description": "\nPatch WanVideo model to use TeaCache. Speeds up inference by caching the output and \napplying it instead of doing the step. Best results are achieved by choosing the \nappropriate coefficients for the model. Early steps should never be skipped, with too \naggressive values this can happen and the motion suffers. Starting later can help with that too. \nWhen NOT using coefficients, the threshold value should be \nabout 10 times smaller than the value used with coefficients. \n\nOfficial recommended values https://github.com/ali-vilab/TeaCache/tree/main/TeaCache4Wan2.1:\n\n\n
\n+-------------------+--------+---------+--------+\n|       Model       |  Low   | Medium  |  High  |\n+-------------------+--------+---------+--------+\n| Wan2.1 t2v 1.3B  |  0.05  |  0.07   |  0.08  |\n| Wan2.1 t2v 14B   |  0.14  |  0.15   |  0.20  |\n| Wan2.1 i2v 480P  |  0.13  |  0.19   |  0.26  |\n| Wan2.1 i2v 720P  |  0.18  |  0.20   |  0.30  |\n+-------------------+--------+---------+--------+\n
\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/teacache", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "WanVideoEnhanceAVideoKJ": {"input": {"required": {"model": ["MODEL"], "latent": ["LATENT", {"tooltip": "Only used to get the latent count"}], "weight": ["FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Strength of the enhance effect"}]}}, "input_order": {"required": ["model", "latent", "weight"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["model"], "name": "WanVideoEnhanceAVideoKJ", "display_name": "WanVideo Enhance A Video (native)", "description": "https://github.com/NUS-HPC-AI-Lab/Enhance-A-Video", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "SkipLayerGuidanceWanVideo": {"input": {"required": {"model": ["MODEL"], "blocks": ["STRING", {"default": "10", "multiline": false}], "start_percent": ["FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}], "end_percent": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}]}}, "input_order": {"required": ["model", "blocks", "start_percent", "end_percent"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "SkipLayerGuidanceWanVideo", "display_name": "Skip Layer Guidance WanVideo", "description": "Simplified skip layer guidance that only skips the uncond on selected blocks", "python_module": "custom_nodes.comfyui-kjnodes", "category": "advanced/guidance", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "TimerNodeKJ": {"input": {"required": {"any_input": ["*"], "mode": [["start", "stop"]], "name": ["STRING", {"default": "Timer"}]}, "optional": {"timer": ["TIMER"]}}, "input_order": {"required": ["any_input", "mode", "name"], "optional": ["timer"]}, "is_input_list": false, "output": ["*", "TIMER", "INT"], "output_is_list": [false, false, false], "output_name": ["any_output", "timer", "time"], "name": "TimerNodeKJ", "display_name": "Timer Node KJ", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "HunyuanVideoEncodeKeyframesToCond": {"input": {"required": {"model": ["MODEL"], "positive": ["CONDITIONING"], "vae": ["VAE"], "start_frame": ["IMAGE"], "end_frame": ["IMAGE"], "num_frames": ["INT", {"default": 33, "min": 2, "max": 4096, "step": 1}], "tile_size": ["INT", {"default": 512, "min": 64, "max": 4096, "step": 64}], "overlap": ["INT", {"default": 64, "min": 0, "max": 4096, "step": 32}], "temporal_size": ["INT", {"default": 64, "min": 8, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to encode at a time."}], "temporal_overlap": ["INT", {"default": 8, "min": 4, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to overlap."}]}, "optional": {"negative": ["CONDITIONING"]}}, "input_order": {"required": ["model", "positive", "vae", "start_frame", "end_frame", "num_frames", "tile_size", "overlap", "temporal_size", "temporal_overlap"], "optional": ["negative"]}, "is_input_list": false, "output": ["MODEL", "CONDITIONING", "CONDITIONING", "LATENT"], "output_is_list": [false, false, false, false], "output_name": ["model", "positive", "negative", "latent"], "name": "HunyuanVideoEncodeKeyframesToCond", "display_name": "HunyuanVideo Encode Keyframes To Cond", "description": "", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/videomodels", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CFGZeroStarAndInit": {"input": {"required": {"model": ["MODEL"], "use_zero_init": ["BOOLEAN", {"default": true}], "zero_init_steps": ["INT", {"default": 0, "min": 0, "tooltip": "for zero init, starts from 0 so first step is always zeroed out if use_zero_init enabled"}]}}, "input_order": {"required": ["model", "use_zero_init", "zero_init_steps"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "CFGZeroStarAndInit", "display_name": "CFG Zero Star/Init", "description": "https://github.com/WeichenFan/CFG-Zero-star", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "ModelPatchTorchSettings": {"input": {"required": {"model": ["MODEL"], "enable_fp16_accumulation": ["BOOLEAN", {"default": false, "tooltip": "Enable torch.backends.cuda.matmul.allow_fp16_accumulation, requires pytorch 2.7.0 nightly."}]}}, "input_order": {"required": ["model", "enable_fp16_accumulation"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "ModelPatchTorchSettings", "display_name": "Model Patch Torch Settings", "description": "Adds callbacks to model to set torch settings before and after running the model.", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "WanVideoNAG": {"input": {"required": {"model": ["MODEL"], "conditioning": ["CONDITIONING"], "nag_scale": ["FLOAT", {"default": 11.0, "min": 0.0, "max": 100.0, "step": 0.001, "tooltip": "Strength of negative guidance effect"}], "nag_alpha": ["FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.001, "tooltip": "Mixing coefficient in that controls the balance between the normalized guided representation and the original positive representation."}], "nag_tau": ["FLOAT", {"default": 2.5, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Clipping threshold that controls how much the guided attention can deviate from the positive attention."}]}}, "input_order": {"required": ["model", "conditioning", "nag_scale", "nag_alpha", "nag_tau"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["model"], "name": "WanVideoNAG", "display_name": "WanVideoNAG", "description": "https://github.com/ChenDarYen/Normalized-Attention-Guidance", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/experimental", "output_node": false, "has_intermediate_output": false, "experimental": true, "search_aliases": []}, "CreateInstanceDiffusionTracking": {"input": {"required": {"coordinates": ["STRING", {"forceInput": true}], "width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "bbox_width": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "bbox_height": ["INT", {"default": 512, "min": 16, "max": 4096, "step": 1}], "class_name": ["STRING", {"default": "class_name"}], "class_id": ["INT", {"default": 0, "min": 0, "max": 255, "step": 1}], "prompt": ["STRING", {"default": "prompt", "multiline": true}]}, "optional": {"size_multiplier": ["FLOAT", {"default": [1.0], "forceInput": true}], "fit_in_frame": ["BOOLEAN", {"default": true}]}}, "input_order": {"required": ["coordinates", "width", "height", "bbox_width", "bbox_height", "class_name", "class_id", "prompt"], "optional": ["size_multiplier", "fit_in_frame"]}, "is_input_list": false, "output": ["TRACKING", "STRING", "INT", "INT", "INT", "INT"], "output_is_list": [false, false, false, false, false, false], "output_name": ["tracking", "prompt", "width", "height", "bbox_width", "bbox_height"], "name": "CreateInstanceDiffusionTracking", "display_name": "CreateInstanceDiffusionTracking", "description": "\nCreates tracking data to be used with InstanceDiffusion: \nhttps://github.com/logtd/ComfyUI-InstanceDiffusion \n \nInstanceDiffusion prompt format: \n\"class_id.class_name\": \"prompt\", \nfor example: \n\"1.head\": \"((head))\", \n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/InstanceDiffusion", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AppendInstanceDiffusionTracking": {"input": {"required": {"tracking_1": ["TRACKING", {"forceInput": true}], "tracking_2": ["TRACKING", {"forceInput": true}]}, "optional": {"prompt_1": ["STRING", {"default": "", "forceInput": true}], "prompt_2": ["STRING", {"default": "", "forceInput": true}]}}, "input_order": {"required": ["tracking_1", "tracking_2"], "optional": ["prompt_1", "prompt_2"]}, "is_input_list": false, "output": ["TRACKING", "STRING"], "output_is_list": [false, false], "output_name": ["tracking", "prompt"], "name": "AppendInstanceDiffusionTracking", "display_name": "AppendInstanceDiffusionTracking", "description": "\nAppends tracking data to be used with InstanceDiffusion: \nhttps://github.com/logtd/ComfyUI-InstanceDiffusion \n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/InstanceDiffusion", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DrawInstanceDiffusionTracking": {"input": {"required": {"image": ["IMAGE"], "tracking": ["TRACKING", {"forceInput": true}], "box_line_width": ["INT", {"default": 2, "min": 1, "max": 10, "step": 1}], "draw_text": ["BOOLEAN", {"default": true}], "font": [["FreeMono.ttf", "FreeMonoBoldOblique.otf", "TTNorms-Black.otf"]], "font_size": ["INT", {"default": 20}]}}, "input_order": {"required": ["image", "tracking", "box_line_width", "draw_text", "font", "font_size"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["image"], "name": "DrawInstanceDiffusionTracking", "display_name": "DrawInstanceDiffusionTracking", "description": "\nDraws the tracking data from \nCreateInstanceDiffusionTracking -node.\n\n", "python_module": "custom_nodes.comfyui-kjnodes", "category": "KJNodes/InstanceDiffusion", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SleepNodeAny": {"input": {"required": {"interval": ["FLOAT", {"default": 0.0}]}, "optional": {"inputs": ["*", {"default": 0.0}]}}, "input_order": {"required": ["interval"], "optional": ["inputs"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "SleepNodeAny", "display_name": "SleepNode", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SleepNodeImage": {"input": {"required": {"interval": ["FLOAT", {"default": 0.0}], "image": ["*"]}}, "input_order": {"required": ["interval", "image"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "SleepNodeImage", "display_name": "Sleep (Image tunnel)", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ErrorNode": {"input": {"required": {"error_msg": ["STRING", {"default": "Error"}]}}, "input_order": {"required": ["error_msg"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "ErrorNode", "display_name": "ErrorNode", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CurrentTimestamp": {"input": {"required": {"format_string": ["STRING", {"default": "", "display": "text", "comment": "Leave blank for raw timestamp, or use format directives like '%Y-%m-%d %H:%M:%S'"}]}}, "input_order": {"required": ["format_string"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "CurrentTimestamp", "display_name": "Current Timestamp", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DebugComboInputNode": {"input": {"required": {"input1": [["0", "1", "2"], {"default": "0"}]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "DebugComboInputNode", "display_name": "Debug Combo Input", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TextPreviewNode": {"input": {"required": {"text": ["*", {"default": "text", "type": "output"}]}}, "input_order": {"required": ["text"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "TextPreviewNode", "display_name": "Text Preview", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Misc", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ParseExifNode": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "ParseExifNode", "display_name": "Parse Exif", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Misc", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SaveImageCustomNode": {"input": {"required": {"images": ["IMAGE"], "filename_prefix": ["STRING", {"default": "ComfyUI"}], "subfolder_dir": ["STRING", {"default": ""}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "filename_prefix", "subfolder_dir"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "SaveImageCustomNode", "display_name": "Save Image Custom Node", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SaveTextCustomNode": {"input": {"required": {"text": ["*"], "filename_prefix": ["STRING", {"default": "ComfyUI"}], "subfolder_dir": ["STRING", {"default": ""}], "filename": ["STRING", {"default": ""}]}}, "input_order": {"required": ["text", "filename_prefix", "subfolder_dir", "filename"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "SaveTextCustomNode", "display_name": "Save Text Custom Node", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "text", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DumpTextJsonlNode": {"input": {"required": {"text": ["*"], "filename_prefix": ["STRING", {"default": "ComfyUI"}], "subfolder_dir": ["STRING", {"default": ""}], "filename": ["STRING", {"default": "dump.jsonl"}], "keyname": ["STRING", {"default": "text"}]}}, "input_order": {"required": ["text", "filename_prefix", "subfolder_dir", "filename", "keyname"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "DumpTextJsonlNode", "display_name": "Dump Text JSONL Node", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "text", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ConcatGridNode": {"input": {"required": {"images": ["IMAGE"], "direction": [["horizontal", "vertical", "square-like"], {"default": "horizontal"}], "match_method": [["resize", "pad"], {"default": "resize"}]}}, "input_order": {"required": ["images", "direction", "match_method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ConcatGridNode", "display_name": "Concat Grid (Batch to single grid)", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConcatTwoImagesNode": {"input": {"required": {"imageA": ["IMAGE"], "imageB": ["IMAGE"], "direction": [["horizontal", "vertical"], {"default": "horizontal"}], "match_method": [["resize", "pad"], {"default": "resize"}]}}, "input_order": {"required": ["imageA", "imageB", "direction", "match_method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ConcatTwoImagesNode", "display_name": "Concat 2 Images to Grid", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SaveCustomJPGNode": {"input": {"required": {"images": ["IMAGE"], "filename_prefix": ["STRING", {"default": "ComfyUI"}], "subfolder_dir": ["STRING", {"default": ""}]}, "optional": {"quality": ["INT", {"default": 95}], "optimize": ["BOOLEAN", {"default": true}], "metadata_string": ["STRING", {"default": ""}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "filename_prefix", "subfolder_dir"], "optional": ["quality", "optimize", "metadata_string"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "SaveCustomJPGNode", "display_name": "Save Custom JPG Node", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SaveImageWebpCustomNode": {"input": {"required": {"images": ["IMAGE"], "filename_prefix": ["STRING", {"default": "ComfyUI"}], "subfolder_dir": ["STRING", {"default": ""}]}, "optional": {"quality": ["INT", {"default": 100}], "lossless": ["BOOLEAN", {"default": false}], "compression": ["INT", {"default": 4}], "optimize": ["BOOLEAN", {"default": false}], "metadata_string": ["STRING", {"default": ""}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["images", "filename_prefix", "subfolder_dir"], "optional": ["quality", "lossless", "compression", "optimize", "metadata_string"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "SaveImageWebpCustomNode", "display_name": "Save Image Webp Node", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ComposeRGBAImageFromMask": {"input": {"required": {"image": ["IMAGE"], "mask": ["MASK"], "invert": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["image", "mask", "invert"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ComposeRGBAImageFromMask", "display_name": "Compose RGBA Image From Mask", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeImageNode": {"input": {"required": {"image": ["IMAGE"], "width": ["INT", {"default": 512}], "height": ["INT", {"default": 512}], "method": [["NEAREST", "LANCZOS", "BICUBIC"]]}}, "input_order": {"required": ["image", "width", "height", "method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ResizeImageNode", "display_name": "Resize Image", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeImageResolution": {"input": {"required": {"image": ["IMAGE"], "resolution": ["INT", {"default": 512}], "method": [["NEAREST", "LANCZOS", "BICUBIC"]]}}, "input_order": {"required": ["image", "resolution", "method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ResizeImageResolution", "display_name": "Resize Image With Resolution", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeImageEnsuringMultiple": {"input": {"required": {"image": ["IMAGE"], "multiple": ["INT", {"default": 32}], "method": [["NEAREST", "LANCZOS", "BICUBIC"]]}}, "input_order": {"required": ["image", "multiple", "method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ResizeImageEnsuringMultiple", "display_name": "Resize Image Ensuring W/H Multiple", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeImageResolutionIfBigger": {"input": {"required": {"image": ["IMAGE"], "resolution": ["INT", {"default": 512}], "method": [["NEAREST", "LANCZOS", "BICUBIC"]]}}, "input_order": {"required": ["image", "resolution", "method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ResizeImageResolutionIfBigger", "display_name": "Resize Image With Resolution If Bigger", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeImageResolutionIfSmaller": {"input": {"required": {"image": ["IMAGE"], "resolution": ["INT", {"default": 512}], "method": [["NEAREST", "LANCZOS", "BICUBIC"]]}}, "input_order": {"required": ["image", "resolution", "method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ResizeImageResolutionIfSmaller", "display_name": "Resize Image With Resolution If Smaller", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Base64DecodeNode": {"input": {"required": {"base64_string": ["STRING"]}}, "input_order": {"required": ["base64_string"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "Base64DecodeNode", "display_name": "Base64 Decode to Image", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ImageFromURLNode": {"input": {"required": {"url": ["STRING"]}}, "input_order": {"required": ["url"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ImageFromURLNode", "display_name": "Download Image from URL", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Base64EncodeNode": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"quality": ["INT", {"default": 100}], "format": [["PNG", "WEBP", "JPG"], {"default": "PNG"}], "gzip_compress": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["image"], "optional": ["quality", "format", "gzip_compress"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "Base64EncodeNode", "display_name": "Image to Base64 Encode", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StringToBase64Node": {"input": {"required": {"string": ["STRING"]}, "optional": {"gzip_compress": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["string"], "optional": ["gzip_compress"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "StringToBase64Node", "display_name": "String to Base64 Encode", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Base64ToStringNode": {"input": {"required": {"base64_string": ["STRING"]}}, "input_order": {"required": ["base64_string"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "Base64ToStringNode", "display_name": "Base64 to String Decode", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "InvertImageNode": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "InvertImageNode", "display_name": "Invert Image", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeScaleImageNode": {"input": {"required": {"image": ["IMAGE"], "scale": ["INT", {"default": 2}], "method": [["NEAREST", "LANCZOS", "BICUBIC"]]}}, "input_order": {"required": ["image", "scale", "method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ResizeScaleImageNode", "display_name": "Resize Scale Image", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeShortestToNode": {"input": {"required": {"image": ["IMAGE"], "size": ["INT", {"default": 512}], "method": [["NEAREST", "LANCZOS", "BICUBIC"]]}}, "input_order": {"required": ["image", "size", "method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ResizeShortestToNode", "display_name": "Resize Shortest To", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ResizeLongestToNode": {"input": {"required": {"image": ["IMAGE"], "size": ["INT", {"default": 512}], "method": [["NEAREST", "LANCZOS", "BICUBIC"]]}}, "input_order": {"required": ["image", "size", "method"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ResizeLongestToNode", "display_name": "Resize Longest To", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConvertGreyscaleNode": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ConvertGreyscaleNode", "display_name": "Convert Greyscale", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RotateImageNode": {"input": {"required": {"image": ["IMAGE"], "angle": ["INT", {"default": 0}]}}, "input_order": {"required": ["image", "angle"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "RotateImageNode", "display_name": "Rotate Image", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "BrightnessNode": {"input": {"required": {"image": ["IMAGE"], "factor": ["FLOAT", {"default": 1.0}]}}, "input_order": {"required": ["image", "factor"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "BrightnessNode", "display_name": "Brightness", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ContrastNode": {"input": {"required": {"image": ["IMAGE"], "factor": ["FLOAT", {"default": 1.0}]}}, "input_order": {"required": ["image", "factor"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ContrastNode", "display_name": "Contrast", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SharpnessNode": {"input": {"required": {"image": ["IMAGE"], "factor": ["FLOAT", {"default": 1.0}]}}, "input_order": {"required": ["image", "factor"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "SharpnessNode", "display_name": "Sharpness", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ColorNode": {"input": {"required": {"image": ["IMAGE"], "factor": ["FLOAT", {"default": 1.0}]}}, "input_order": {"required": ["image", "factor"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ColorNode", "display_name": "Color", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConvertRGBNode": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ConvertRGBNode", "display_name": "Convert RGB", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetImageInfoNode": {"input": {"required": {"image": ["IMAGE"]}}, "input_order": {"required": ["image"]}, "is_input_list": false, "output": ["WIDTH", "HEIGHT", "TOTAL_PIXELS"], "output_is_list": [false, false, false], "output_name": ["WIDTH", "HEIGHT", "TOTAL_PIXELS"], "name": "GetImageInfoNode", "display_name": "Get Image Info", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ThresholdNode": {"input": {"required": {"image": ["IMAGE"], "threshold": ["INT", {"default": 128}]}}, "input_order": {"required": ["image", "threshold"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "ThresholdNode", "display_name": "Threshold image with value", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateCompare": {"input": {"required": {"input1": ["*", {"default": 0}], "input2": ["*", {"default": 0}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "LogicGateCompare", "display_name": "ABiggerThanB", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateInvertBasic": {"input": {"required": {"input1": ["*", {"default": 0}]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "LogicGateInvertBasic", "display_name": "Invert Basic", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateNegateValue": {"input": {"required": {"input1": ["*", {"default": 0}]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "LogicGateNegateValue", "display_name": "Negate Value", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateBitwiseShift": {"input": {"required": {"input1": ["INT", {"default": 0}], "input2": ["INT", {"default": 0}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "LogicGateBitwiseShift", "display_name": "Bitwise Shift", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateBitwiseAnd": {"input": {"required": {"input1": ["INT", {"default": 0}], "input2": ["INT", {"default": 0}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "LogicGateBitwiseAnd", "display_name": "Bitwise And", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateBitwiseOr": {"input": {"required": {"input1": ["INT", {"default": 0}], "input2": ["INT", {"default": 0}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "LogicGateBitwiseOr", "display_name": "Bitwise Or", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateBitwiseXor": {"input": {"required": {"input1": ["INT", {"default": 0}], "input2": ["INT", {"default": 0}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "LogicGateBitwiseXor", "display_name": "Bitwise Xor", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateBitwiseNot": {"input": {"required": {"input1": ["INT", {"default": 0}]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "LogicGateBitwiseNot", "display_name": "Bitwise Not", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateCompareString": {"input": {"required": {"regex": ["STRING", {"default": ""}], "input2": ["STRING", {"default": ""}]}}, "input_order": {"required": ["regex", "input2"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "LogicGateCompareString", "display_name": "AContainsB(String)", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetLengthString": {"input": {"required": {"string": ["STRING", {"default": ""}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "GetLengthString", "display_name": "Length of String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StaticNumberInt": {"input": {"required": {"number": ["INT", {"default": 0}]}}, "input_order": {"required": ["number"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "StaticNumberInt", "display_name": "Static Number Int", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StaticNumberFloat": {"input": {"required": {"number": ["FLOAT", {"default": 0.0}]}}, "input_order": {"required": ["number"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "StaticNumberFloat", "display_name": "Static Number Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "StaticString": {"input": {"required": {"string": ["STRING", {"default": ""}]}}, "input_order": {"required": ["string"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "StaticString", "display_name": "Static String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateAnd": {"input": {"required": {"input1": ["*", {"default": 0.0}], "input2": ["*", {"default": 0.0}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "LogicGateAnd", "display_name": "AAndBGate", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateOr": {"input": {"required": {"input1": ["*", {"default": 0}], "input2": ["*", {"default": 0}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "LogicGateOr", "display_name": "AOrBGate", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogicGateEither": {"input": {"required": {"condition": ["*", {"default": 0}], "input1": ["*", {"default": ""}], "input2": ["*", {"default": ""}]}}, "input_order": {"required": ["condition", "input1", "input2"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "LogicGateEither", "display_name": "ReturnAorBValue", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AddNode": {"input": {"required": {"input1": ["*", {"default": 0}], "input2": ["*", {"default": 0}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "AddNode", "display_name": "Add Values", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MergeString": {"input": {"required": {"input1": ["*", {"default": ""}], "input2": ["*", {"default": ""}]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "MergeString", "display_name": "Merge String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ReplaceString": {"input": {"required": {"String": ["STRING", {"default": ""}], "Regex": ["STRING", {"default": ""}], "ReplaceWith": ["STRING", {"default": ""}]}}, "input_order": {"required": ["String", "Regex", "ReplaceWith"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "ReplaceString", "display_name": "Replace String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MemoryNode": {"input": {"required": {"input1": ["*", {"default": ""}], "flag": ["*", {"default": 0}]}}, "input_order": {"required": ["input1", "flag"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "MemoryNode", "display_name": "Memory String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SystemRandomFloat": {"input": {"required": {"min_val": ["FLOAT", {"default": 0.0, "min": -999999999, "max": 999999999.0, "step": 0.01, "display": "number"}], "max_val": ["FLOAT", {"default": 1.0, "min": -999999999, "max": 999999999.0, "step": 0.01, "display": "number"}], "precision": ["INT", {"default": 0, "min": 0, "max": 10, "step": 1, "display": "number"}]}}, "input_order": {"required": ["min_val", "max_val", "precision"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "SystemRandomFloat", "display_name": "System Random Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DimensionSelectorWithSeedNode": {"input": {"required": {"resolution": ["INT", {"default": 1024}], "min_ratio": ["FLOAT", {"default": 0.6}], "max_ratio": ["FLOAT", {"default": 1.6}], "multiples": ["INT", {"default": 32}], "seed": ["INT", {"default": 0}]}}, "input_order": {"required": ["resolution", "min_ratio", "max_ratio", "multiples", "seed"]}, "is_input_list": false, "output": ["INT", "INT"], "output_is_list": [false, false], "output_name": ["INT", "INT"], "name": "DimensionSelectorWithSeedNode", "display_name": "Random Width/Height with Resolution", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SystemRandomInt": {"input": {"required": {"min_val": ["INT", {"default": 0, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1, "display": "number"}], "max_val": ["INT", {"default": 9223372036854775807, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["min_val", "max_val"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "SystemRandomInt", "display_name": "System Random Int", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SystemUUIDGenerator": {"input": {"required": {"length": ["INT", {"default": 36, "min": 1, "max": 36, "step": 1, "display": "number"}]}}, "input_order": {"required": ["length"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "SystemUUIDGenerator", "display_name": "UUID Generator", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "UniformRandomFloat": {"input": {"required": {"min_val": ["FLOAT", {"default": 0.0, "min": -999999999, "max": 999999999.0, "step": 0.02, "display": "number"}], "max_val": ["FLOAT", {"default": 1.0, "min": -999999999, "max": 999999999.0, "step": 0.02, "display": "number"}], "decimal_places": ["INT", {"default": 1, "min": 0, "max": 10, "step": 1, "display": "number"}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["min_val", "max_val", "decimal_places", "seed"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "UniformRandomFloat", "display_name": "Uniform Random Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "TriangularRandomFloat": {"input": {"required": {"low": ["FLOAT", {"default": 0.0, "min": -999999999, "max": 999999999.0, "step": 0.02, "display": "number"}], "high": ["FLOAT", {"default": 1.0, "min": -999999999, "max": 999999999.0, "step": 0.02, "display": "number"}], "mode": ["FLOAT", {"default": 0.5, "min": -999999999, "max": 999999999.0, "step": 0.02, "display": "number"}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["low", "high", "mode", "seed"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "TriangularRandomFloat", "display_name": "Triangular Random Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "WeightedRandomChoice": {"input": {"required": {"input_string": ["STRING", {"default": "apple|10$banana|1$orange|3", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["input_string", "separator", "seed"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "WeightedRandomChoice", "display_name": "Weighted Random Choice", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "RandomGaussianFloat": {"input": {"required": {"mean": ["FLOAT", {"default": 0.0, "min": -999999999, "max": 999999999.0, "step": 0.01}], "std_dev": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 999999999.0, "step": 0.01}], "decimal_places": ["INT", {"default": 2, "min": 0, "max": 10, "step": 1}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["mean", "std_dev", "decimal_places", "seed"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "RandomGaussianFloat", "display_name": "Random Gaussian Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SystemRandomGaussianFloat": {"input": {"required": {"mean": ["FLOAT", {"default": 0.0, "min": -999999999, "max": 999999999.0, "step": 0.01}], "std_dev": ["FLOAT", {"default": 1.0, "min": 0.0, "max": 999999999.0, "step": 0.01}], "decimal_places": ["INT", {"default": 2, "min": 0, "max": 10, "step": 1}]}}, "input_order": {"required": ["mean", "std_dev", "decimal_places"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "SystemRandomGaussianFloat", "display_name": "System Random Gaussian Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ProbabilityGate": {"input": {"required": {"probability": ["FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1}]}}, "input_order": {"required": ["probability", "seed"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "ProbabilityGate", "display_name": "Probability Gate", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "UniformRandomInt": {"input": {"required": {"min_val": ["INT", {"default": 0, "min": -999999999, "max": 999999999, "step": 1, "display": "number"}], "max_val": ["INT", {"default": 1, "min": -999999999, "max": 999999999, "step": 1, "display": "number"}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["min_val", "max_val", "seed"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "UniformRandomInt", "display_name": "Uniform Random Int", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "UniformRandomChoice": {"input": {"required": {"input_string": ["STRING", {"default": "a$b$c", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["input_string", "separator", "seed"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "UniformRandomChoice", "display_name": "Uniform Random Choice", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ManualChoiceString": {"input": {"required": {"input_string": ["STRING", {"default": "a$b$c", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "index": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["input_string", "separator", "index"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "ManualChoiceString", "display_name": "Manual Choice String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ManualChoiceInt": {"input": {"required": {"input_string": ["STRING", {"default": "1$2$3", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "index": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["input_string", "separator", "index"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "ManualChoiceInt", "display_name": "Manual Choice Int", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ManualChoiceFloat": {"input": {"required": {"input_string": ["STRING", {"default": "1.0$2.0$3.0", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "index": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["input_string", "separator", "index"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "ManualChoiceFloat", "display_name": "Manual Choice Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RandomShuffleInt": {"input": {"required": {"input_string": ["STRING", {"default": "1$2$3", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["input_string", "separator", "seed"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "RandomShuffleInt", "display_name": "Random Shuffle Int", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "RandomShuffleFloat": {"input": {"required": {"input_string": ["STRING", {"default": "1.0$2.0$3.0", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["input_string", "separator", "seed"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "RandomShuffleFloat", "display_name": "Random Shuffle Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "RandomShuffleString": {"input": {"required": {"input_string": ["STRING", {"default": "a$b$c", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "seed": ["INT", {"default": 0, "min": 0, "max": 9223372036854775807, "step": 1, "display": "number"}]}}, "input_order": {"required": ["input_string", "separator", "seed"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "RandomShuffleString", "display_name": "Random Shuffle String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "CounterInteger": {"input": {"required": {"start": ["FLOAT", {"default": 0.0, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1.0, "display": "number"}]}, "optional": {"reset": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["start"], "optional": ["reset"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "CounterInteger", "display_name": "Counter Integer", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "CounterFloat": {"input": {"required": {"start": ["FLOAT", {"default": 0.0, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1.0, "display": "number"}]}, "optional": {"reset": "BOOLEAN", "step": ["FLOAT", {"default": 1.0, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1.0, "display": "number"}]}}, "input_order": {"required": ["start"], "optional": ["reset", "step"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "CounterFloat", "display_name": "Counter Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "YieldableIteratorString": {"input": {"required": {"input_string": ["STRING", {"default": "a$b$c", "display": "text"}], "separator": ["STRING", {"default": "$", "display": "text"}], "reset": "BOOLEAN"}}, "input_order": {"required": ["input_string", "separator", "reset"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "YieldableIteratorString", "display_name": "Yieldable Iterator String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "YieldableIteratorInt": {"input": {"required": {"start": ["INT", {"default": 0, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1, "display": "number"}], "end": ["INT", {"default": 10, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1, "display": "number"}], "step": ["INT", {"default": 1, "min": -9223372036854775807, "max": 9223372036854775807, "step": 1, "display": "number"}], "reset": "BOOLEAN"}}, "input_order": {"required": ["start", "end", "step", "reset"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "YieldableIteratorInt", "display_name": "Yieldable (Sequential) Iterator Int", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "StringListToCombo": {"input": {"required": {"string": ["STRING", {"default": ""}], "separator": ["STRING", {"default": "$"}]}, "optional": {"index": ["INT", {"default": 0}]}}, "input_order": {"required": ["string", "separator"], "optional": ["index"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "StringListToCombo", "display_name": "String List to Combo", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConvertComboToString": {"input": {"required": {"combo": ["*", {"default": []}], "separator": ["STRING", {"default": "$"}]}}, "input_order": {"required": ["combo", "separator"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "ConvertComboToString", "display_name": "Convert Combo to String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Logic Gates", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConvertAny2Int": {"input": {"required": {"input1": ["*", {"default": 0.0}]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "ConvertAny2Int", "display_name": "Convert to Int", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Conversion", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConvertAny2Float": {"input": {"required": {"input1": ["*", {"default": 0.0}]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "ConvertAny2Float", "display_name": "Convert to Float", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Conversion", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConvertAny2Boolean": {"input": {"required": {"input1": ["*", {"default": 0.0}]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "ConvertAny2Boolean", "display_name": "Convert to Boolean", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Conversion", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ConvertAny2String": {"input": {"required": {"input1": ["*", {"default": 0.0}]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "ConvertAny2String", "display_name": "Convert to String", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Conversion", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MinNode": {"input": {"required": {"input1": ["*"], "input2": ["*"]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "MinNode", "display_name": "Min", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MaxNode": {"input": {"required": {"input1": ["*"], "input2": ["*"]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "MaxNode", "display_name": "Max", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RoundNode": {"input": {"required": {"input1": ["*"]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "RoundNode", "display_name": "Round", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "AbsNode": {"input": {"required": {"input1": ["*"]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "AbsNode", "display_name": "Abs", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FloorNode": {"input": {"required": {"input1": ["*"]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "FloorNode", "display_name": "Floor", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CeilNode": {"input": {"required": {"input1": ["*"]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["INT"], "name": "CeilNode", "display_name": "Ceil", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "PowerNode": {"input": {"required": {"input1": ["*"], "power": ["*"]}}, "input_order": {"required": ["input1", "power"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "PowerNode", "display_name": "Power", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SigmoidNode": {"input": {"required": {"input1": ["FLOAT"]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "SigmoidNode", "display_name": "Sigmoid", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "IsPrimeNode": {"input": {"required": {"value": ["INT", {"default": 1, "min": -9999999999, "max": 9999999999, "step": 1}]}, "optional": {"threshold": ["INT", {"default": 10000000, "min": 1, "max": 9999999999, "step": 1}], "miller_rabin_rounds": ["INT", {"default": 5, "min": 1, "max": 50, "step": 1}]}}, "input_order": {"required": ["value"], "optional": ["threshold", "miller_rabin_rounds"]}, "is_input_list": false, "output": ["BOOLEAN"], "output_is_list": [false], "output_name": ["BOOLEAN"], "name": "IsPrimeNode", "display_name": "Is Prime?", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "RAMPNode": {"input": {"required": {"input1": ["FLOAT"]}}, "input_order": {"required": ["input1"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "RAMPNode", "display_name": "RAMP", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LogNode": {"input": {"required": {"input1": ["FLOAT"], "base": ["FLOAT"]}}, "input_order": {"required": ["input1", "base"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "LogNode", "display_name": "Log", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "MultiplyNode": {"input": {"required": {"input1": ["*"], "input2": ["*"]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "MultiplyNode", "display_name": "Multiply", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DivideNode": {"input": {"required": {"input1": ["*"], "input2": ["*"]}}, "input_order": {"required": ["input1", "input2"]}, "is_input_list": false, "output": ["FLOAT"], "output_is_list": [false], "output_name": ["FLOAT"], "name": "DivideNode", "display_name": "Divide", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Math", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SDWebuiAPINode": {"input": {"required": {"prompt": ["STRING", {"default": ""}], "api_endpoint": ["STRING", {"default": ""}]}, "optional": {"auth": ["STRING", {"default": ""}], "seed": ["INT", {"default": -1}], "negative_prompt": ["STRING", {"default": ""}], "steps": ["INT", {"default": 28}], "width": ["INT", {"default": 1024}], "height": ["INT", {"default": 1024}], "hr_scale": ["FLOAT", {"default": 1.5}], "hr_upscale": ["STRING", {"default": "Latent"}], "enable_hr": ["BOOLEAN", {"default": false}], "cfg_scale": ["INT", {"default": 7}]}}, "input_order": {"required": ["prompt", "api_endpoint"], "optional": ["auth", "seed", "negative_prompt", "steps", "width", "height", "hr_scale", "hr_upscale", "enable_hr", "cfg_scale"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "SDWebuiAPINode", "display_name": "Get Image From Prompt", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "WebUI API", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SDWebuiAPIFallbackNode": {"input": {"required": {"prompt": ["STRING", {"default": ""}], "api_endpoint": ["STRING", {"default": ""}]}, "optional": {"auth": ["STRING", {"default": ""}], "seed": ["INT", {"default": -1}], "negative_prompt": ["STRING", {"default": ""}], "steps": ["INT", {"default": 28}], "width": ["INT", {"default": 1024}], "height": ["INT", {"default": 1024}], "hr_scale": ["FLOAT", {"default": 1.5}], "hr_upscale": ["STRING", {"default": "Latent"}], "enable_hr": ["BOOLEAN", {"default": false}], "cfg_scale": ["INT", {"default": 7}]}}, "input_order": {"required": ["prompt", "api_endpoint"], "optional": ["auth", "seed", "negative_prompt", "steps", "width", "height", "hr_scale", "hr_upscale", "enable_hr", "cfg_scale"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "SDWebuiAPIFallbackNode", "display_name": "Get Image From Prompt (Fallback)", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "WebUI API", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetRatingNode": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"model_name": [["EVA02_Large", "ViT_Large", "SwinV2", "ConvNext", "ConvNextV2", "ViT", "MOAT", "SwinV2_v3", "ConvNext_v3", "ViT_v3"], {"default": "EVA02_Large"}]}}, "input_order": {"required": ["image"], "optional": ["model_name"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "GetRatingNode", "display_name": "Get Rating Class", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "tagger", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetRatingFromTextNode": {"input": {"required": {"image": ["STRING", {"default": "/path/to/image.jpg"}]}, "optional": {"model_name": [["EVA02_Large", "ViT_Large", "SwinV2", "ConvNext", "ConvNextV2", "ViT", "MOAT", "SwinV2_v3", "ConvNext_v3", "ViT_v3"], {"default": "EVA02_Large"}]}}, "input_order": {"required": ["image"], "optional": ["model_name"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "GetRatingFromTextNode", "display_name": "Get Rating Class From Text", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "tagger", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "CensorImageByRating": {"input": {"required": {"image": ["IMAGE"], "rating_threshold": [["general", "sensitive", "questionable", "explicit"]], "censor_method": [["blur", "white", "pixelate"]]}, "optional": {"model_name": [["EVA02_Large", "ViT_Large", "SwinV2", "ConvNext", "ConvNextV2", "ViT", "MOAT", "SwinV2_v3", "ConvNext_v3", "ViT_v3"], {"default": "EVA02_Large"}]}}, "input_order": {"required": ["image", "rating_threshold", "censor_method"], "optional": ["model_name"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "CensorImageByRating", "display_name": "Censor Image by Rating", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "FilterTagsNode": {"input": {"required": {"tags": ["STRING"], "filter_tags": ["STRING"]}, "optional": {"separator": ["STRING", {"default": ","}]}}, "input_order": {"required": ["tags", "filter_tags"], "optional": ["separator"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "FilterTagsNode", "display_name": "Filter Tags", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "safety", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetTagsAboveThresholdNode": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"threshold": ["FLOAT", {"default": 0.4}], "replace": ["BOOLEAN", {"default": false}], "model_name": [["EVA02_Large", "ViT_Large", "SwinV2", "ConvNext", "ConvNextV2", "ViT", "MOAT", "SwinV2_v3", "ConvNext_v3", "ViT_v3"], {"default": "EVA02_Large"}]}}, "input_order": {"required": ["image"], "optional": ["threshold", "replace", "model_name"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "GetTagsAboveThresholdNode", "display_name": "Get Tags Above Threshold", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "tagger", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetTagsAboveThresholdFromTextNode": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"threshold": ["FLOAT", {"default": 0.4}], "replace": ["BOOLEAN", {"default": false}], "model_name": [["EVA02_Large", "ViT_Large", "SwinV2", "ConvNext", "ConvNextV2", "ViT", "MOAT", "SwinV2_v3", "ConvNext_v3", "ViT_v3"], {"default": "EVA02_Large"}]}}, "input_order": {"required": ["image"], "optional": ["threshold", "replace", "model_name"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "GetTagsAboveThresholdFromTextNode", "display_name": "Get Tags Above Threshold From Text", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "tagger", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetCharactersAboveThresholdNode": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"threshold": ["FLOAT", {"default": 0.4}], "replace": ["BOOLEAN", {"default": false}], "model_name": [["EVA02_Large", "ViT_Large", "SwinV2", "ConvNext", "ConvNextV2", "ViT", "MOAT", "SwinV2_v3", "ConvNext_v3", "ViT_v3"], {"default": "EVA02_Large"}]}}, "input_order": {"required": ["image"], "optional": ["threshold", "replace", "model_name"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "GetCharactersAboveThresholdNode", "display_name": "Get Chars Above Threshold", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "tagger", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetCharactersAboveThresholdFromTextNode": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"threshold": ["FLOAT", {"default": 0.4}], "replace": ["BOOLEAN", {"default": false}], "model_name": [["EVA02_Large", "ViT_Large", "SwinV2", "ConvNext", "ConvNextV2", "ViT", "MOAT", "SwinV2_v3", "ConvNext_v3", "ViT_v3"], {"default": "EVA02_Large"}]}}, "input_order": {"required": ["image"], "optional": ["threshold", "replace", "model_name"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "GetCharactersAboveThresholdFromTextNode", "display_name": "Get Chars Above Threshold From Text", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "tagger", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GetAllTagsAboveThresholdNode": {"input": {"required": {"image": ["IMAGE"]}, "optional": {"threshold": ["FLOAT", {"default": 0.4}], "replace": ["BOOLEAN", {"default": false}], "model_name": [["EVA02_Large", "ViT_Large", "SwinV2", "ConvNext", "ConvNextV2", "ViT", "MOAT", "SwinV2_v3", "ConvNext_v3", "ViT_v3"], {"default": "EVA02_Large"}]}}, "input_order": {"required": ["image"], "optional": ["threshold", "replace", "model_name"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "GetAllTagsAboveThresholdNode", "display_name": "Get All Tags Above Threshold", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "tagger", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SecureBase64Encrypt": {"input": {"required": {"images": ["IMAGE"], "public_key_pem": ["STRING", {"multiline": true, "default": ""}]}}, "input_order": {"required": ["images", "public_key_pem"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["encrypted_base64"], "name": "SecureBase64Encrypt", "display_name": "Secure Base64 Encrypt", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SecureWebPDecrypt": {"input": {"required": {"encrypted_base64": ["STRING", {"multiline": true, "default": ""}], "private_key_pem": ["STRING", {"multiline": true, "default": ""}]}}, "input_order": {"required": ["encrypted_base64", "private_key_pem"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["Decrypted_Image"], "name": "SecureWebPDecrypt", "display_name": "Secure WebP Decrypt", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "image", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "JsonParseNode": {"input": {"required": {"json_string": ["STRING", {"default": "{\"key\": \"value\"}"}]}}, "input_order": {"required": ["json_string"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "JsonParseNode", "display_name": "Pyobjects/JSON -> PyObject", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "JsonDumpNode": {"input": {"required": {"py_obj": ["*"]}, "optional": {"indent": ["INT", {"default": 0}]}}, "input_order": {"required": ["py_obj"], "optional": ["indent"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "JsonDumpNode", "display_name": "Pyobjects/PyObject -> JSON", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "JsonDumpAnyStructureNode": {"input": {"required": {"py_obj": ["*"]}, "optional": {"indent": ["INT", {"default": 0}]}}, "input_order": {"required": ["py_obj"], "optional": ["indent"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "JsonDumpAnyStructureNode", "display_name": "Pyobjects/PyStructure -> JSON", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictCreateNode": {"input": {"required": {}}, "input_order": {"required": []}, "is_input_list": false, "output": ["DICT"], "output_is_list": [false], "output_name": ["DICT"], "name": "DictCreateNode", "display_name": "Pyobjects/Create Dict", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictSetNode": {"input": {"required": {"py_dict": ["DICT"], "key": ["STRING", {"default": "some_key"}], "value": ["*", {"default": "some_value"}]}}, "input_order": {"required": ["py_dict", "key", "value"]}, "is_input_list": false, "output": ["DICT"], "output_is_list": [false], "output_name": ["DICT"], "name": "DictSetNode", "display_name": "Pyobjects/Dict Set", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictGetNode": {"input": {"required": {"py_dict": ["DICT"], "key": ["STRING", {"default": "some_key"}]}}, "input_order": {"required": ["py_dict", "key"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "DictGetNode", "display_name": "Pyobjects/Dict Get", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictRemoveKeyNode": {"input": {"required": {"py_dict": ["DICT"], "key": ["STRING", {"default": "some_key"}]}}, "input_order": {"required": ["py_dict", "key"]}, "is_input_list": false, "output": ["DICT"], "output_is_list": [false], "output_name": ["DICT"], "name": "DictRemoveKeyNode", "display_name": "Pyobjects/Dict Remove Key", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictMergeNode": {"input": {"required": {"dict_a": ["DICT"], "dict_b": ["DICT"]}, "optional": {"in_place": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["dict_a", "dict_b"], "optional": ["in_place"]}, "is_input_list": false, "output": ["DICT"], "output_is_list": [false], "output_name": ["DICT"], "name": "DictMergeNode", "display_name": "Pyobjects/Dict Merge", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictKeysNode": {"input": {"required": {"py_dict": ["DICT"]}}, "input_order": {"required": ["py_dict"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "DictKeysNode", "display_name": "Pyobjects/Dict Keys", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictValuesNode": {"input": {"required": {"py_dict": ["DICT"]}}, "input_order": {"required": ["py_dict"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "DictValuesNode", "display_name": "Pyobjects/Dict Values", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictItemsNode": {"input": {"required": {"py_dict": ["DICT"]}}, "input_order": {"required": ["py_dict"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "DictItemsNode", "display_name": "Pyobjects/Dict Items", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "DictPointer": {"input": {"required": {"py_dict": ["DICT"]}, "optional": {"reset": ["BOOLEAN"]}}, "input_order": {"required": ["py_dict"], "optional": ["reset"]}, "is_input_list": false, "output": ["DICT"], "output_is_list": [false], "output_name": ["DICT"], "name": "DictPointer", "display_name": "Pyobjects/Dict Pointer", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GlobalVarSetNode": {"input": {"required": {"key": ["STRING", {"default": "my_key"}], "value": ["*", {"default": "my_value"}]}}, "input_order": {"required": ["key", "value"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "GlobalVarSetNode", "display_name": "Pyobjects/Global Var Set", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GlobalVarSetIfNotExistsNode": {"input": {"required": {"key": ["STRING", {"default": "my_key"}], "value": ["*", {"default": "my_value"}]}}, "input_order": {"required": ["key", "value"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "GlobalVarSetIfNotExistsNode", "display_name": "Pyobjects/Global Var Set If Not Exists", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GlobalVarGetNode": {"input": {"required": {"key": ["STRING", {"default": "my_key"}]}}, "input_order": {"required": ["key"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "GlobalVarGetNode", "display_name": "Pyobjects/Global Var Get", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GlobalVarRemoveNode": {"input": {"required": {"key": ["STRING", {"default": "my_key"}]}}, "input_order": {"required": ["key"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "GlobalVarRemoveNode", "display_name": "Pyobjects/Global Var Remove", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GlobalVarSaveNode": {"input": {"required": {"key": ["STRING", {"default": "my_key"}], "filepath": ["STRING", {"default": "my_global_var.json"}]}, "optional": {"allow_missing": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["key", "filepath"], "optional": ["allow_missing"]}, "is_input_list": false, "output": ["STRING"], "output_is_list": [false], "output_name": ["STRING"], "name": "GlobalVarSaveNode", "display_name": "Pyobjects/Global Var Save", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GlobalVarLoadNode": {"input": {"required": {"key": ["STRING", {"default": "my_key"}], "filepath": ["STRING", {"default": "my_global_var.json"}]}, "optional": {"allow_missing": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["key", "filepath"], "optional": ["allow_missing"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "GlobalVarLoadNode", "display_name": "Pyobjects/Global Var Load", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ListCreateNode": {"input": {"required": {}}, "input_order": {"required": []}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "ListCreateNode", "display_name": "Pyobjects/Create List", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ListAppendNode": {"input": {"required": {"py_list": ["LIST"], "item": ["*"]}}, "input_order": {"required": ["py_list", "item"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "ListAppendNode", "display_name": "Pyobjects/List Append", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ListGetNode": {"input": {"required": {"py_list": ["LIST"], "index": ["INT", {"default": 0}]}}, "input_order": {"required": ["py_list", "index"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "ListGetNode", "display_name": "Pyobjects/List Get", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ListRemoveNode": {"input": {"required": {"py_list": ["LIST"], "item": ["*"]}}, "input_order": {"required": ["py_list", "item"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "ListRemoveNode", "display_name": "Pyobjects/List Remove", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ListPopNode": {"input": {"required": {"py_list": ["LIST"]}, "optional": {"index": ["INT", {"default": -1}]}}, "input_order": {"required": ["py_list"], "optional": ["index"]}, "is_input_list": false, "output": ["*", "LIST"], "output_is_list": [false, false], "output_name": ["*", "LIST"], "name": "ListPopNode", "display_name": "Pyobjects/List Pop", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ListInsertNode": {"input": {"required": {"py_list": ["LIST"], "index": ["INT", {"default": 0}], "item": ["*"]}}, "input_order": {"required": ["py_list", "index", "item"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "ListInsertNode", "display_name": "Pyobjects/List Insert", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ListExtendNode": {"input": {"required": {"list_a": ["LIST"], "list_b": ["LIST"]}}, "input_order": {"required": ["list_a", "list_b"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "ListExtendNode", "display_name": "Pyobjects/List Extend", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ToListTypeNode": {"input": {"required": {"py_obj": ["*"]}}, "input_order": {"required": ["py_obj"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "ToListTypeNode", "display_name": "Pyobjects/Cast to LIST", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "ToSetTypeNode": {"input": {"required": {"py_obj": ["*"]}}, "input_order": {"required": ["py_obj"]}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "ToSetTypeNode", "display_name": "Pyobjects/Cast to SET", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetCreateNode": {"input": {"required": {}}, "input_order": {"required": []}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "SetCreateNode", "display_name": "Pyobjects/Create Set", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetAddNode": {"input": {"required": {"py_set": ["SET"], "item": ["*"]}}, "input_order": {"required": ["py_set", "item"]}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "SetAddNode", "display_name": "Pyobjects/Set Add", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetRemoveNode": {"input": {"required": {"py_set": ["SET"], "item": ["*"]}}, "input_order": {"required": ["py_set", "item"]}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "SetRemoveNode", "display_name": "Pyobjects/Set Remove", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetUnionNode": {"input": {"required": {"py_set_a": ["SET"], "py_set_b": ["SET"]}}, "input_order": {"required": ["py_set_a", "py_set_b"]}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "SetUnionNode", "display_name": "Pyobjects/Set Union", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetIntersectionNode": {"input": {"required": {"py_set_a": ["SET"], "py_set_b": ["SET"]}}, "input_order": {"required": ["py_set_a", "py_set_b"]}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "SetIntersectionNode", "display_name": "Pyobjects/Set Intersection", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetDifferenceNode": {"input": {"required": {"py_set_a": ["SET"], "py_set_b": ["SET"]}}, "input_order": {"required": ["py_set_a", "py_set_b"]}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "SetDifferenceNode", "display_name": "Pyobjects/Set Difference", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetSymDifferenceNode": {"input": {"required": {"py_set_a": ["SET"], "py_set_b": ["SET"]}}, "input_order": {"required": ["py_set_a", "py_set_b"]}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "SetSymDifferenceNode", "display_name": "Pyobjects/Set Symmetric Difference", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetClearNode": {"input": {"required": {"py_set": ["SET"]}}, "input_order": {"required": ["py_set"]}, "is_input_list": false, "output": ["SET"], "output_is_list": [false], "output_name": ["SET"], "name": "SetClearNode", "display_name": "Pyobjects/Set Clear", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "SetToListNode": {"input": {"required": {"py_set": ["SET"]}}, "input_order": {"required": ["py_set"]}, "is_input_list": false, "output": ["LIST"], "output_is_list": [false], "output_name": ["LIST"], "name": "SetToListNode", "display_name": "Pyobjects/Set to List", "description": "", "python_module": "custom_nodes.comfyui-logicutils", "category": "Data", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "LoaderGGUF": {"input": {"required": {"gguf_name": [["InfiniteTalk/Wan2_1-InfiniteTalk_Single_Q8.gguf", "Wan2.1_T2V_14B_FusionX_VACE-Q4_K_S.gguf", "flux/fluxRealistic_ggufFluxRealistic.gguf", "gguf/Qwen_Image_Edit-Q5_1.gguf", "gguf/qwen-image-edit-2511-Q5_1.gguf", "wan/wan2.1-i2v-14b-480p-Q8_0.gguf", "wan2.2/wan2.2_t2v_high_noise_14B_Q6_K.gguf", "wan2.2/wan2.2_t2v_high_noise_14B_Q8_0.gguf", "wan2.2/wan2.2_t2v_low_noise_14B_Q6_K.gguf", "wan2.2/wan2.2_t2v_low_noise_14B_Q8_0.gguf"]]}}, "input_order": {"required": ["gguf_name"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "LoaderGGUF", "display_name": "GGUF Loader", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "ClipLoaderGGUF": {"input": {"required": {"clip_name": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "type": [["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis", "longcat_image"]]}, "optional": {"device": [["default", "cpu"], {"advanced": true}]}}, "input_order": {"required": ["clip_name", "type"], "optional": ["device"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "ClipLoaderGGUF", "display_name": "GGUF CLIP Loader", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "DualClipLoaderGGUF": {"input": {"required": {"clip_name1": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "clip_name2": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "type": [["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15", "kandinsky5", "kandinsky5_image", "ltxv", "newbie", "ace"]]}, "optional": {"device": [["default", "cpu"], {"advanced": true}]}}, "input_order": {"required": ["clip_name1", "clip_name2", "type"], "optional": ["device"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "DualClipLoaderGGUF", "display_name": "GGUF DualCLIP Loader", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "TripleClipLoaderGGUF": {"input": {"required": {"clip_name1": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "clip_name2": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "clip_name3": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]]}}, "input_order": {"required": ["clip_name1", "clip_name2", "clip_name3"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "TripleClipLoaderGGUF", "display_name": "GGUF TripleCLIP Loader", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "QuadrupleClipLoaderGGUF": {"input": {"required": {"clip_name1": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "clip_name2": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "clip_name3": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]], "clip_name4": [["EVA02_CLIP_L_336_psz14_s6B.pt", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "ViT-L-14.pt", "byt5_small_glyphxl_fp16.safetensors", "clip-vit-large-patch14/model.safetensors", "clip_g.safetensors", "clip_g_hidream.safetensors", "clip_l.safetensors", "clip_l_hidream.safetensors", "gemma_2_2b_fp16.safetensors", "gemma_3_12B_it.safetensors", "gemma_3_12B_it_fp4_mixed.safetensors", "gemma_3_12B_it_fp8_scaled.safetensors", "gemma_3_12B_it_fpmixed.safetensors", "gemma_3_4b_it_bf16.safetensors", "ggufs/mmproj-F16.gguf", "ggufs/mmproj-Qwen2.5-VL-7B-Instruct-Q8_0.gguf", "google--siglip-so400m-patch14-384/model.safetensors", "jina_clip_v2_bf16.safetensors", "llama_3.1_8b_instruct_fp8_scaled.safetensors", "llava_llama3_fp16.safetensors", "llava_llama3_fp8_scaled.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "long_clip/ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "mistral_3_small_flux2_bf16.safetensors", "mistral_3_small_flux2_fp4_mixed.safetensors", "mistral_3_small_flux2_fp8.safetensors", "oldt5_xxl_fp16.safetensors", "oldt5_xxl_fp8_e4m3fn_scaled.safetensors", "open-clip-xlm-roberta-large-vit-huge-14_visual_fp16.safetensors", "ovis_2.5.safetensors", "qwen3.5_4b_bf16.safetensors", "qwen_0.6b_ace15.safetensors", "qwen_1.7b_ace15.safetensors", "qwen_2.5_vl_7b.safetensors", "qwen_2.5_vl_7b_fp8_scaled.safetensors", "qwen_2.5_vl_fp16.safetensors", "qwen_3_4b.safetensors", "qwen_3_4b_fp4_flux2.safetensors", "qwen_3_4b_fp4_mixed.safetensors", "qwen_3_4b_fp8_mixed.safetensors", "qwen_3_8b.safetensors", "qwen_3_8b_fp4mixed.safetensors", "qwen_3_8b_fp8mixed.safetensors", "qwen_4b_ace15.safetensors", "sa1/t5_base.safetensors", "sc/model.safetensors", "sd3m/clip_g.safetensors", "sd3m/clip_l.safetensors", "sd3m/t5xxl_fp16.safetensors", "sd3m/t5xxl_fp8_e4m3fn.safetensors", "siglip-so400m-patch14-384/model.safetensors", "t5/google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", "t5/t5xxl_fp16.safetensors", "t5/t5xxl_fp8_e4m3fn.safetensors", "t5/t5xxl_fp8_e4m3fn_scaled.safetensors", "t5xxl_fp16.safetensors", "t5xxl_fp8_e4m3fn.safetensors", "t5xxl_fp8_e4m3fn_scaled.safetensors", "umt5-xxl-enc-bf16.safetensors", "umt5-xxl-enc-fp8_e4m3fn.safetensors", "umt5_xxl_fp16.safetensors", "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "wan2_1/umt5-xxl-enc-bf16.safetensors", "wan2_1/umt5-xxl-enc-fp8_e4m3fn.safetensors"]]}}, "input_order": {"required": ["clip_name1", "clip_name2", "clip_name3", "clip_name4"]}, "is_input_list": false, "output": ["CLIP"], "output_is_list": [false], "output_name": ["CLIP"], "name": "QuadrupleClipLoaderGGUF", "display_name": "GGUF QuadrupleCLIP Loader", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "LoaderGGUFAdvanced": {"input": {"required": {"gguf_name": [["InfiniteTalk/Wan2_1-InfiniteTalk_Single_Q8.gguf", "Wan2.1_T2V_14B_FusionX_VACE-Q4_K_S.gguf", "flux/fluxRealistic_ggufFluxRealistic.gguf", "gguf/Qwen_Image_Edit-Q5_1.gguf", "gguf/qwen-image-edit-2511-Q5_1.gguf", "wan/wan2.1-i2v-14b-480p-Q8_0.gguf", "wan2.2/wan2.2_t2v_high_noise_14B_Q6_K.gguf", "wan2.2/wan2.2_t2v_high_noise_14B_Q8_0.gguf", "wan2.2/wan2.2_t2v_low_noise_14B_Q6_K.gguf", "wan2.2/wan2.2_t2v_low_noise_14B_Q8_0.gguf"]], "dequant_dtype": [["default", "target", "float32", "float16", "bfloat16"], {"default": "default"}], "patch_dtype": [["default", "target", "float32", "float16", "bfloat16"], {"default": "default"}], "patch_on_device": ["BOOLEAN", {"default": false}]}}, "input_order": {"required": ["gguf_name", "dequant_dtype", "patch_dtype", "patch_on_device"]}, "is_input_list": false, "output": ["MODEL"], "output_is_list": [false], "output_name": ["MODEL"], "name": "LoaderGGUFAdvanced", "display_name": "GGUF Loader (Advanced)", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "VaeGGUF": {"input": {"required": {"vae_name": [["FLUX1/ae.safetensors", "SD1.5/openai_consistency_decoder/decoder.pt", "SD1.5/orangemix.vae.pt", "SD1.5/vae-ft-mse-840000-ema-pruned.safetensors", "SD2.1/kl-f8-anime2.ckpt", "SDXL/sdxl_vae.safetensors", "Stable-Cascade/effnet_encoder.safetensors", "Stable-Cascade/stage_a.safetensors", "Wan2.1_VAE.pth", "Wan2_1_FlashVSR_TCDecoder_fp32.safetensors", "Wan2_1_VAE_bf16.safetensors", "Wan2_1_VAE_fp32.safetensors", "ace_1.5_vae.safetensors", "ae.safetensors", "cosmos/cosmos_cv8x8x8_1.0.safetensors", "cosmos_cv8x8x8_1.0.safetensors", "flux/ae.safetensors", "flux/ae.sft", "flux2-vae.safetensors", "flux_vae.safetensors", "hunyuan/hunyuan_video_vae_bf16.safetensors", "hunyuan/hunyuan_video_vae_fp32.safetensors", "hunyuan3d-vae-v2-1/model.fp16.ckpt", "hunyuan_image_2.1_vae_fp16.safetensors", "hunyuan_image_refiner_vae_fp16.safetensors", "hunyuan_video_vae_bf16.safetensors", "hunyuanvideo15_vae_fp16.safetensors", "kl-f8-anime2.ckpt", "kolors/diffusion_pytorch_model.fp16.safetensors", "kolors/diffusion_pytorch_model.safetensors", "mochi_vae.safetensors", "pyramid_flow_vae_bf16.safetensors", "pyramid_flow_vae_fp32.safetensors", "qwen_image_layered_vae.safetensors", "qwen_image_vae.safetensors", "sc/stage_a.safetensors", "sd1/YOZORA.vae.pt", "sd1/clearvae_v23.safetensors", "sd1/color101VAE_v1.safetensors", "sd1/difconsistencyRAWVAE_v10.pt", "sd1/klF8Anime2VAE_klF8Anime2VAE.safetensors", "sd1/matrixVAE_v30.pt", "sd1/vae-ft-mse-840000-ema-pruned.safetensors", "sdxl/fixFP16ErrorsSDXLLowerMemoryUse_v10.safetensors", "sdxl/lastpiecexlVAE_baseonA0897.safetensors", "sdxl/sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.safetensors", "vaeFtMse840000EmaPruned_vaeFtMse840k.safetensors", "wan2.2_vae.safetensors", "wan2_1/Wan2_1_VAE_bf16.safetensors", "wan2_1/Wan2_1_VAE_fp32.safetensors", "wan_2.1_vae.safetensors", "wan_alpha_2.1_vae_alpha_channel.safetensors", "wan_alpha_2.1_vae_rgb_channel.safetensors", "taesd", "taesdxl", "taesd3", "taef1"]]}}, "input_order": {"required": ["vae_name"]}, "is_input_list": false, "output": ["VAE"], "output_is_list": [false], "output_name": ["VAE"], "name": "VaeGGUF", "display_name": "GGUF VAE Loader", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "GGUFUndo": {"input": {"required": {"select_gguf": [["InfiniteTalk/Wan2_1-InfiniteTalk_Single_Q8.gguf", "Wan2.1_T2V_14B_FusionX_VACE-Q4_K_S.gguf", "flux/fluxRealistic_ggufFluxRealistic.gguf", "gguf/Qwen_Image_Edit-Q5_1.gguf", "gguf/qwen-image-edit-2511-Q5_1.gguf", "wan/wan2.1-i2v-14b-480p-Q8_0.gguf", "wan2.2/wan2.2_t2v_high_noise_14B_Q6_K.gguf", "wan2.2/wan2.2_t2v_high_noise_14B_Q8_0.gguf", "wan2.2/wan2.2_t2v_low_noise_14B_Q6_K.gguf", "wan2.2/wan2.2_t2v_low_noise_14B_Q8_0.gguf"]]}}, "input_order": {"required": ["select_gguf"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "GGUFUndo", "display_name": "GGUF Convertor (Reverse)", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GGUFSave": {"input": {"required": {"select_safetensors": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors"]]}}, "input_order": {"required": ["select_safetensors"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "GGUFSave", "display_name": "GGUF Convertor (Alpha)", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "GGUFRun": {"input": {"required": {"select_safetensors": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors"]]}}, "input_order": {"required": ["select_safetensors"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "GGUFRun", "display_name": "GGUF Convertor (Zero)", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "TENSORCut": {"input": {"required": {"select_safetensors": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors"]]}}, "input_order": {"required": ["select_safetensors"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "TENSORCut", "display_name": "TENSOR Cutter (Beta)", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "TENSORBoost": {"input": {"required": {"select_safetensors": [["Chroma1-HD-fp8mixed.safetensors", "Chroma1-HD.safetensors", "Ditto_models/ditto_global_comfy.safetensors", "Ditto_models/ditto_global_style_comfy.safetensors", "Ditto_models/ditto_sim2real_comfy.safetensors", "FLUX.1-Fill-dev/ae.safetensors", "FLUX.1-Fill-dev/flux1-fill-dev.safetensors", "FLUX.1-Fill-dev/text_encoder/model.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00001-of-00002.safetensors", "FLUX.1-Fill-dev/text_encoder_2/model-00002-of-00002.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00001-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00002-of-00003.safetensors", "FLUX.1-Fill-dev/transformer/diffusion_pytorch_model-00003-of-00003.safetensors", "FLUX.1-Fill-dev/vae/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/flux1-redux-dev.safetensors", "FLUX.1-Redux-dev/image_embedder/diffusion_pytorch_model.safetensors", "FLUX.1-Redux-dev/image_encoder/model.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "FLUX1/flux_dev_fp8_scaled_diffusion_model.safetensors", "FLUX2/flux2_dev_fp8mixed.safetensors", "FlashVSR/Wan2_1-T2V-1_3B_FlashVSR_fp32.safetensors", "FlashVSR/Wan2_1_FlashVSR_LQ_proj_model_bf16.safetensors", "IC-Light/iclight_sd15_fbc.safetensors", "InfiniteTalk/Wan2_1-InfiniTetalk-Single_fp16.safetensors", "InfiniteTalk/Wan2_1-InfiniteTalk-Single_fp8_e4m3fn_scaled_KJ.safetensors", "NewBie-Image-Exp0.1-bf16.safetensors", "Phantom-Wan-1_3B_fp16.safetensors", "STOIQOAfroditeFLUXXL_F1DAlpha.safetensors", "Wan2.1-Fun-1.3B-Control.safetensors", "Wan2.1_Fun_V1.1_1.3B_Control_Camera.safetensors", "Wan2.1_T2V_14B_FusionX_VACE-FP16.safetensors", "Wan2.2-Fun-A14B-Control/high_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/low_noise_model/diffusion_pytorch_model.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "Wan2.2-Fun-A14B-Control/wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "Wan2_1-I2V-14B-720P_fp8_e4m3fn.safetensors", "Wan2_1-I2V-14B-720P_fp8_e5m2.safetensors", "Wan2_1-I2V-ATI-14B_fp8_e4m3fn.safetensors", "Wan2_1-SkyReels-V2-DF-1_3B-540P_fp32.safetensors", "Wan2_1-T2V-14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_14B_bf16.safetensors", "Wan2_1-VACE_module_14B_fp8_e4m3fn.safetensors", "Wan2_1-VACE_module_1_3B_bf16.safetensors", "acestep_v1.5_base.safetensors", "acestep_v1.5_turbo.safetensors", "capybara_v0.1.safetensors", "chroma-radiance-x0.safetensors", "chrono_edit_14B_fp16.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "cosmos/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "cosmos_predict2/cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2/cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2/cosmos_predict2_2B_video2world_480p_16fps.safetensors", "firered_image_edit_1.0_bf16.safetensors", "flux-2-klein-4b.safetensors", "flux-2-klein-base-4b.safetensors", "flux.1-fill-dev-OneReward-transformer_bf16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp16.safetensors", "flux.1-fill-dev-OneReward-transformer_fp8.safetensors", "flux/flux1-canny-dev.safetensors", "flux/flux1-depth-dev.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-fill-dev.safetensors", "flux/flux1-redux-dev.safetensors", "flux/flux1-schnell-fp8.safetensors", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux1-canny-dev.safetensors", "flux1-depth-dev-nvfp4.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev-kontext_fp8_scaled.safetensors", "flux1-dev.safetensors", "flux1-fill-dev.safetensors", "flux1-krea-dev.safetensors", "flux1-krea-dev_fp8_scaled.safetensors", "flux1-schnell.safetensors", "flux2_dev_fp8mixed.safetensors", "fluxFillFP8_v10.safetensors", "hidream_e1_1_bf16.safetensors", "hidream_e1_full_bf16.safetensors", "hidream_i1_dev_bf16.safetensors", "hidream_i1_dev_fp8.safetensors", "hidream_i1_fast_bf16.safetensors", "hidream_i1_fast_fp8.safetensors", "hidream_i1_full_fp16.safetensors", "hidream_i1_full_fp8.safetensors", "humo_1.7B_fp16.safetensors", "humo_17B_fp16.safetensors", "humo_17B_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors", "hunyuan/hunyuan_video_FastVideo_720_fp8_e4m3fn.safetensors", "hunyuan_video_720_cfgdistill_bf16.safetensors", "hunyuan_video_image_to_video_720p_bf16.safetensors", "hunyuan_video_t2v_720p_bf16.safetensors", "hunyuan_video_v2_replace_image_to_video_720p_bf16.safetensors", "hunyuanimage2.1_bf16.safetensors", "hunyuanimage2.1_distilled_bf16.safetensors", "hunyuanimage2.1_distilled_fp8_e4m3fn.safetensors", "hunyuanimage2.1_fp8_e4m3fn.safetensors", "hunyuanimage2.1_refiner_bf16.safetensors", "hunyuanimage2.1_refiner_fp8_e4m3fn.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_1080p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_i2v_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_i2v_step_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_480p_t2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_480p_t2v_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_i2v_cfg_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_i2v_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp16.safetensors", "hunyuanvideo1.5_720p_sr_distilled_fp8_scaled.safetensors", "hunyuanvideo1.5_720p_t2v_fp16.safetensors", "longcat_image_bf16.safetensors", "lumina_2_model_bf16.safetensors", "mochi_preview_bf16.safetensors", "mochi_preview_fp8_scaled.safetensors", "omnigen2_fp16.safetensors", "ovis_image_bf16.safetensors", "pyramid_flow_miniflux_bf16_v1.safetensors", "pyramid_flow_miniflux_bf16_v2.safetensors", "pyramid_flow_miniflux_fp8_e4m3fn_v2.safetensors", "qwen_image_2512_bf16.safetensors", "qwen_image_2512_fp8_e4m3fn.safetensors", "qwen_image_bf16.safetensors", "qwen_image_edit_2509_bf16.safetensors", "qwen_image_edit_2509_fp8_e4m3fn.safetensors", "qwen_image_edit_2509_fp8mixed.safetensors", "qwen_image_edit_2511_bf16.safetensors", "qwen_image_edit_2511_fp8mixed.safetensors", "qwen_image_edit_bf16.safetensors", "qwen_image_edit_fp8_e4m3fn.safetensors", "qwen_image_fp8_e4m3fn.safetensors", "qwen_image_fp8_hq.safetensors", "qwen_image_fp8mixed.safetensors", "qwen_image_layered_bf16.safetensors", "qwen_image_layered_fp8mixed.safetensors", "qwen_image_nvfp4.safetensors", "rt_detr_v4-x-hgnet_fp16.safetensors", "rt_detr_v4-x-hgnet_fp32.safetensors", "sc/stage_b.safetensors", "sc/stage_b_bf16.safetensors", "sc/stage_b_lite.safetensors", "sc/stage_b_lite_bf16.safetensors", "sc/stage_c.safetensors", "sc/stage_c_bf16.safetensors", "sc/stage_c_lite.safetensors", "sc/stage_c_lite_bf16.safetensors", "sc/stage_c_pretrained.safetensors", "sd1/iclight_sd15_fbc.safetensors", "sd1/iclight_sd15_fbc_unet_ldm.safetensors", "sd1/iclight_sd15_fc.safetensors", "sd1/iclight_sd15_fc_unet_ldm.safetensors", "sd1/iclight_sd15_fcon.safetensors", "svdq-int4-flux.1-fill-dev/transformer_blocks.safetensors", "svdq-int4-flux.1-fill-dev/unquantized_layers.safetensors", "svdq-int4_r128-qwen-image-edit-2509.safetensors", "svdq-int4_r128-qwen-image-edit.safetensors", "svdq-int4_r32-qwen-image.safetensors", "wan/Wan2_1-I2V-14B-720p_fp8_e4m3fn_scaled_KJ.safetensors", "wan/aniWan2114BFp8E4m3fn_t2v.safetensors", "wan2.1/Phantom-Wan-14B_fp16.safetensors", "wan2.1/Wan2_1_kwai_recammaster_1_3B_step20000_bf16.safetensors", "wan2.1/wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1/wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp16.safetensors", "wan2.1_flf2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors", "wan2.1_fun_camera_v1.1_14B_bf16.safetensors", "wan2.1_fun_control_1.3B_bf16.safetensors", "wan2.1_fun_inp_1.3B_bf16.safetensors", "wan2.1_i2v_480p_14B_bf16.safetensors", "wan2.1_i2v_480p_14B_fp16.safetensors", "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "wan2.1_i2v_720p_14B_bf16.safetensors", "wan2.1_i2v_720p_14B_fp16.safetensors", "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "wan2.1_magref_14B_fp16.safetensors", "wan2.1_t2v_1.3B_bf16.safetensors", "wan2.1_t2v_1.3B_fp16.safetensors", "wan2.1_t2v_14B_bf16.safetensors", "wan2.1_t2v_14B_fp16.safetensors", "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "wan2.1_t2v_14B_fp8_scaled.safetensors", "wan2.1_vace_1.3B_fp16.safetensors", "wan2.1_vace_1.3B_preview_fp16.safetensors", "wan2.1_vace_14B_fp16.safetensors", "wan2.2/Wan2_2-I2V-A14B-HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-I2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B-LOW_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2/Wan2_2-T2V-A14B_HIGH_fp8_e4m3fn_scaled_KJ.safetensors", "wan2.2_animate_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_bf16.safetensors", "wan2.2_fun_camera_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_camera_low_noise_14B_bf16.safetensors", "wan2.2_fun_camera_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_5B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_bf16.safetensors", "wan2.2_fun_control_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_control_low_noise_14B_bf16.safetensors", "wan2.2_fun_control_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_5B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_inpaint_low_noise_14B_bf16.safetensors", "wan2.2_fun_inpaint_low_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_high_noise_14B_bf16.safetensors", "wan2.2_fun_vace_high_noise_14B_fp8_scaled.safetensors", "wan2.2_fun_vace_low_noise_14B_bf16.safetensors", "wan2.2_fun_vace_low_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_high_noise_14B_fp16.safetensors", "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_i2v_low_noise_14B_fp16.safetensors", "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_s2v_14B_bf16.safetensors", "wan2.2_s2v_14B_fp8_scaled.safetensors", "wan2.2_t2v_high_noise_14B_fp16.safetensors", "wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors", "wan2.2_t2v_low_noise_14B_fp16.safetensors", "wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors", "wan2.2_ti2v_5B_fp16.safetensors", "wanAIWan21VideoModelSafetensors_kijaiWan21I2V14B480P.safetensors", "xl-inpaint-0.1/diffusion_pytorch_model.fp16.safetensors", "z_image_bf16.safetensors", "z_image_turbo_bf16.safetensors", "z_image_turbo_nvfp4.safetensors"]]}}, "input_order": {"required": ["select_safetensors"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "TENSORBoost", "display_name": "TENSOR Booster", "description": "", "python_module": "custom_nodes.gguf", "category": "gguf", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Context Big (rgthree)": {"input": {"required": {}, "optional": {"base_ctx": ["RGTHREE_CONTEXT"], "model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "latent": ["LATENT"], "images": ["IMAGE"], "seed": ["INT", {"forceInput": true}], "steps": ["INT", {"forceInput": true}], "step_refiner": ["INT", {"forceInput": true}], "cfg": ["FLOAT", {"forceInput": true}], "ckpt_name": [["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"], {"forceInput": true}], "sampler": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], {"forceInput": true}], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], {"forceInput": true}], "clip_width": ["INT", {"forceInput": true}], "clip_height": ["INT", {"forceInput": true}], "text_pos_g": ["STRING", {"forceInput": true}], "text_pos_l": ["STRING", {"forceInput": true}], "text_neg_g": ["STRING", {"forceInput": true}], "text_neg_l": ["STRING", {"forceInput": true}], "mask": ["MASK"], "control_net": ["CONTROL_NET"]}, "hidden": {}}, "input_order": {"required": [], "optional": ["base_ctx", "model", "clip", "vae", "positive", "negative", "latent", "images", "seed", "steps", "step_refiner", "cfg", "ckpt_name", "sampler", "scheduler", "clip_width", "clip_height", "text_pos_g", "text_pos_l", "text_neg_g", "text_neg_l", "mask", "control_net"], "hidden": []}, "is_input_list": false, "output": ["RGTHREE_CONTEXT", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "LATENT", "IMAGE", "INT", "INT", "INT", "FLOAT", ["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"], ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], ["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], "INT", "INT", "STRING", "STRING", "STRING", "STRING", "MASK", "CONTROL_NET"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["CONTEXT", "MODEL", "CLIP", "VAE", "POSITIVE", "NEGATIVE", "LATENT", "IMAGE", "SEED", "STEPS", "STEP_REFINER", "CFG", "CKPT_NAME", "SAMPLER", "SCHEDULER", "CLIP_WIDTH", "CLIP_HEIGHT", "TEXT_POS_G", "TEXT_POS_L", "TEXT_NEG_G", "TEXT_NEG_L", "MASK", "CONTROL_NET"], "name": "Context Big (rgthree)", "display_name": "Context Big (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Context (rgthree)": {"input": {"required": {}, "optional": {"base_ctx": ["RGTHREE_CONTEXT"], "model": ["MODEL"], "clip": ["CLIP"], "vae": ["VAE"], "positive": ["CONDITIONING"], "negative": ["CONDITIONING"], "latent": ["LATENT"], "images": ["IMAGE"], "seed": ["INT", {"forceInput": true}]}, "hidden": {"version": "FLOAT"}}, "input_order": {"required": [], "optional": ["base_ctx", "model", "clip", "vae", "positive", "negative", "latent", "images", "seed"], "hidden": ["version"]}, "is_input_list": false, "output": ["RGTHREE_CONTEXT", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "LATENT", "IMAGE", "INT"], "output_is_list": [false, false, false, false, false, false, false, false, false], "output_name": ["CONTEXT", "MODEL", "CLIP", "VAE", "POSITIVE", "NEGATIVE", "LATENT", "IMAGE", "SEED"], "name": "Context (rgthree)", "display_name": "Context (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Context Switch (rgthree)": {"input": {"required": {}, "optional": {}}, "input_order": {"required": [], "optional": []}, "is_input_list": false, "output": ["RGTHREE_CONTEXT", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "LATENT", "IMAGE", "INT"], "output_is_list": [false, false, false, false, false, false, false, false, false], "output_name": ["CONTEXT", "MODEL", "CLIP", "VAE", "POSITIVE", "NEGATIVE", "LATENT", "IMAGE", "SEED"], "name": "Context Switch (rgthree)", "display_name": "Context Switch (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Context Switch Big (rgthree)": {"input": {"required": {}, "optional": {}}, "input_order": {"required": [], "optional": []}, "is_input_list": false, "output": ["RGTHREE_CONTEXT", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "LATENT", "IMAGE", "INT", "INT", "INT", "FLOAT", ["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"], ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], ["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], "INT", "INT", "STRING", "STRING", "STRING", "STRING", "MASK", "CONTROL_NET"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["CONTEXT", "MODEL", "CLIP", "VAE", "POSITIVE", "NEGATIVE", "LATENT", "IMAGE", "SEED", "STEPS", "STEP_REFINER", "CFG", "CKPT_NAME", "SAMPLER", "SCHEDULER", "CLIP_WIDTH", "CLIP_HEIGHT", "TEXT_POS_G", "TEXT_POS_L", "TEXT_NEG_G", "TEXT_NEG_L", "MASK", "CONTROL_NET"], "name": "Context Switch Big (rgthree)", "display_name": "Context Switch Big (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Context Merge (rgthree)": {"input": {"required": {}, "optional": {}}, "input_order": {"required": [], "optional": []}, "is_input_list": false, "output": ["RGTHREE_CONTEXT", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "LATENT", "IMAGE", "INT"], "output_is_list": [false, false, false, false, false, false, false, false, false], "output_name": ["CONTEXT", "MODEL", "CLIP", "VAE", "POSITIVE", "NEGATIVE", "LATENT", "IMAGE", "SEED"], "name": "Context Merge (rgthree)", "display_name": "Context Merge (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Context Merge Big (rgthree)": {"input": {"required": {}, "optional": {}}, "input_order": {"required": [], "optional": []}, "is_input_list": false, "output": ["RGTHREE_CONTEXT", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "LATENT", "IMAGE", "INT", "INT", "INT", "FLOAT", ["3D_Anime.safetensors", "3D_Concept.safetensors", "512-inpainting-ema.safetensors", "FLUX1/flux1-dev-fp8.safetensors", "GTA5_Artwork_Diffusion_gtav_style.safetensors", "Juggernaut_X_RunDiffusion.safetensors", "LTXV/ltxv-13b-0.9.7-dev-fp8.safetensors", "MOHAWK_v20.safetensors", "MOHAWK_v20BackedVAE.safetensors", "RealESRGAN_x4plus.safetensors", "RealVisXL_V4.0.safetensors", "Realistic_Vision_V6.0_NV_B1.safetensors", "RunComfyDefault/v1-5-pruned-emaonly.safetensors", "SD1.5/v1-5-pruned-emaonly.ckpt", "SD2.1/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", "SDXL-TURBO/sd_xl_turbo_1.0_fp16.safetensors", "SDXL/sd_xl_base_1.0.safetensors", "SDXL/sd_xl_base_1.0_0.9vae.safetensors", "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors", "SUPIR/SUPIR-v0F.ckpt", "SVD/svd.safetensors", "SVD/svd_xt.safetensors", "TRELLIS-image-large/ckpts/slat_dec_gs_swin8_B_64l8gs32_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_mesh_swin8_B_64l8m256c_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_dec_rf_swin8_B_64l8r16_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16.safetensors", "TRELLIS-image-large/ckpts/slat_flow_img_dit_L_64l8p2_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_dec_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16.safetensors", "TRELLIS-image-large/ckpts/ss_flow_img_dit_L_16l8_fp16.safetensors", "TripoSR/model.ckpt", "ace_step_1.5_turbo_aio.safetensors", "ccsr/real-world_ccsr.ckpt", "cosmos_predict2_14B_t2i.safetensors", "cosmos_predict2_14B_video2world_480p_10fps.safetensors", "cosmos_predict2_14B_video2world_480p_16fps.safetensors", "cosmos_predict2_14B_video2world_720p_10fps.safetensors", "cosmos_predict2_14B_video2world_720p_16fps.safetensors", "cosmos_predict2_2B_t2i.safetensors", "cosmos_predict2_2B_video2world_480p_10fps.safetensors", "cosmos_predict2_2B_video2world_480p_16fps.safetensors", "cosmos_predict2_2B_video2world_720p_10fps.safetensors", "cosmos_predict2_2B_video2world_720p_16fps.safetensors", "cyberrealisticPony_v110.safetensors", "cyberrealisticPony_v8.safetensors", "cyberrealisticPony_v85.safetensors", "cyberrealisticPony_v90Alt1.safetensors", "cyberrealisticXL_v57.safetensors", "disneyPixarCartoon_v10.safetensors", "dreamCreationVirtual3DECommerce_v10.safetensors", "dreamshaperXL_v21TurboDPMSDE.safetensors", "dreamshaper_8.safetensors", "dynamicrafter/dynamicrafter_1024_v1.ckpt", "dynamicrafter/dynamicrafter_1024_v1_bf16.safetensors", "dynamicrafter/dynamicrafter_512_interp_v1.ckpt", "dynamicrafter/dynamicrafter_512_interp_v1_bf16.safetensors", "dynamicrafter/tooncrafter_512_interp-pruned-fp16.safetensors", "epicrealismXL_v8Kiss.safetensors", "epicrealismXL_vxivEyecandyRealism.safetensors", "epicrealismXL_vxviLastfameRealism.safetensors", "epicrealism_naturalSinRC1VAE.safetensors", "flat2DAnimerge_v45Sharp.safetensors", "flux/flux1-dev-bnb-nf4-v2.safetensors", "flux/flux1-dev-fp8.safetensors", "flux/flux1-dev.sft", "flux/flux1-schnell-fp8.safetensors", "flux/flux1-schnell.sft", "flux/fluxRealistic_fluxRealisticSamayV2.safetensors", "flux/kijai/flux1-dev-fp8.safetensors", "flux/lumiere_flux_alpha-fp8.safetensors", "flux1-dev-fp8.safetensors", "flux1-dev.safetensors", "flux1-redux-dev.safetensors", "flux1-schnell-fp8.safetensors", "flux1-schnell.safetensors", "hunyuan3d-dit-v2-mv-turbo_fp16.safetensors", "hunyuan3d-dit-v2-mv_fp16.safetensors", "hunyuan3d-dit-v2_fp16.safetensors", "hunyuan_3d_v2.1.safetensors", "illustriousXL_v01.safetensors", "ilustmix_v6.safetensors", "interiordesignsuperm_v2.safetensors", "juggernautXL_ragnarokBy.safetensors", "juggernautXL_versionXInpaint.safetensors", "leosamsFilmgirlUltra_ultraBaseModel.safetensors", "lotus-depth-d-v1-1.safetensors", "ltx/ltx-video-2b-v0.9.safetensors", "ltxv-13b-0.9.7-dev.safetensors", "majicmixRealistic_v7.safetensors", "meinamix_v12Final.safetensors", "noobaiCyberfix_eps11PerpCyberfix.safetensors", "noobaiXLNAIXL_vPred10Version.safetensors", "photon_v1.safetensors", "ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "ponyRealism_V22MainVAE.safetensors", "ponyRealism_V23.safetensors", "ponyRealism_V23ULTRA.safetensors", "realDream_sdxlPony15.safetensors", "realisticVisionV51_v51VAE.safetensors", "realisticVisionV60B1_v51HyperVAE.safetensors", "realisticVisionV60B1_v51VAE.safetensors", "realvisxlV50_v50LightningBakedvae.safetensors", "sa1/stable_audio_open_1.0.safetensors", "sc/stable_cascade_stage_b.safetensors", "sc/stable_cascade_stage_c.safetensors", "sd1/Deliberate_v2.safetensors", "sd1/absolutereality_lcm.safetensors", "sd1/animatrix_v20.safetensors", "sd1/animeStills_v1.safetensors", "sd1/animerge_v40.safetensors", "sd1/anythingV3_fp16.safetensors", "sd1/artUniverse_v80.safetensors", "sd1/arthemyComics_v70.safetensors", "sd1/cardosAnime_v20.safetensors", "sd1/chilloutmix_NiPrunedFp16Fix.safetensors", "sd1/chilloutmix_NiPrunedFp32Fix.safetensors", "sd1/comfyroll_v10Anime.safetensors", "sd1/comicDiffusion_v1.ckpt", "sd1/comicDiffusion_v2.ckpt", "sd1/crystalClear2_crystalClear2V1.safetensors", "sd1/cyberrealistic_v33.safetensors", "sd1/ddosmix_V2.safetensors", "sd1/dreamshaper_8.safetensors", "sd1/dreamshaper_8LCM.safetensors", "sd1/dreamshaper_8_inpainting.safetensors", "sd1/epicphotogasm_amateurreallife.safetensors", "sd1/epicrealism_naturalSinRC1VAE.safetensors", "sd1/f222_v1.ckpt", "sd1/flat2DAnimerge_v20.safetensors", "sd1/ghostmix_v20Bakedvae.safetensors", "sd1/helloyoung25d_V10f.safetensors", "sd1/helloyoung2d_V12g.safetensors", "sd1/icbinpICantBelieveIts_afterburn.safetensors", "sd1/icbinpICantBelieveIts_seco.safetensors", "sd1/icomix_V05.safetensors", "sd1/majicmixLux_v1.safetensors", "sd1/majicmixLux_v2.safetensors", "sd1/majicmixLux_v3.safetensors", "sd1/majicmixSombre_v20.safetensors", "sd1/meinamix_meinaV11.safetensors", "sd1/meinapastel_v6Pastel.safetensors", "sd1/meinaunreal_v41.safetensors", "sd1/paradigmLCM_v10.safetensors", "sd1/photographerAlpha7_v3.safetensors", "sd1/photonLCM_v10.safetensors", "sd1/photon_v1.safetensors", "sd1/realcartoon3d_v14.safetensors", "sd1/realdosmix_.safetensors", "sd1/realisian_v50.safetensors", "sd1/realisticVisionV60B1_v51VAE.safetensors", "sd1/redshiftDiffusion_v1.ckpt", "sd1/revAnimated_v122EOL.safetensors", "sd1/toonyou_beta6.safetensors", "sd1/universestable_v50Comics.safetensors", "sd1/vinteprotogenmix_V20.safetensors", "sd3.5_large_fp8_scaled.safetensors", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors", "sd35/sd3.5_large.safetensors", "sd35/sd3.5_large_fp8_scaled.safetensors", "sd35/sd3.5_large_turbo.safetensors", "sd3m/sd3_medium.safetensors", "sd3m/sd3_medium_incl_clips.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp16.safetensors", "sd3m/sd3_medium_incl_clips_t5xxlfp8.safetensors", "sd_xl_refiner_1.0_0.9vae.safetensors", "sdpose_wholebody_fp16.safetensors", "sdxl/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", "sdxl/albedobaseXL_v13.safetensors", "sdxl/bluepencilxl_v310.safetensors", "sdxl/breakdomainxl_V06d.safetensors", "sdxl/counterfeitxl_v10.safetensors", "sdxl/crystalClearXL_ccxl.safetensors", "sdxl/dreamshaperXL_lightningDPMSDE.safetensors", "sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors", "sdxl/dreamshaperXL_v2TurboDPMSDE.safetensors", "sdxl/juggernautXL_v9Rdphoto2Lightning.safetensors", "sdxl/juggernautXL_v9Rundiffusionphoto2.safetensors", "sdxl/juggernautXL_version6Rundiffusion.safetensors", "sdxl/leosamsHelloworldXL_helloworldXL70.safetensors", "sdxl/moxieDiffusionXL_v18.safetensors", "sdxl/perfectdeliberate_v5.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.fp16.safetensors", "sdxl/playground-v2.5-1024px-aesthetic.safetensors", "sdxl/proteus_v03.safetensors", "sdxl/protovisionXLHighFidelity3D_releaseV660Bakedvae.safetensors", "sdxl/realcartoonXL_v6.safetensors", "sdxl/realitycheckXL_alpha11.safetensors", "sdxl/reproductionSDXL_2v12.safetensors", "sdxl/reproductionSDXL_v31.safetensors", "sdxl/revAnimated_v11.safetensors", "sdxl/sd_xl_base_1.0.safetensors", "sdxl/sd_xl_turbo_1.0.safetensors", "sdxl/sdxlUnstableDiffusers_v11.safetensors", "sdxl/tamarinXL_v10.safetensors", "sdxl/wildcardxXLTURBO_wildcardxXLTURBOV10.safetensors", "sigclip_vision_patch14_384.safetensors", "stable-audio-open-1.0.safetensors", "supir/SUPIR-v0F.ckpt", "supir/SUPIR-v0Q.ckpt", "svd/svd.safetensors", "svd/svd_xt.safetensors", "svd/svd_xt_1_1.safetensors", "upscale/x4-upscaler-ema.safetensors", "v1-5-pruned-emaonly-fp16.safetensors", "v1-5-pruned-emaonly.ckpt", "v1-5-pruned-emaonly.safetensors", "v2-1_768-ema-pruned.safetensors", "v2-1_768-nonema-pruned.safetensors", "wildcardxXLANIMATION_wildcardxXLANIMATION.safetensors"], ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], ["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"], "INT", "INT", "STRING", "STRING", "STRING", "STRING", "MASK", "CONTROL_NET"], "output_is_list": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "output_name": ["CONTEXT", "MODEL", "CLIP", "VAE", "POSITIVE", "NEGATIVE", "LATENT", "IMAGE", "SEED", "STEPS", "STEP_REFINER", "CFG", "CKPT_NAME", "SAMPLER", "SCHEDULER", "CLIP_WIDTH", "CLIP_HEIGHT", "TEXT_POS_G", "TEXT_POS_L", "TEXT_NEG_G", "TEXT_NEG_L", "MASK", "CONTROL_NET"], "name": "Context Merge Big (rgthree)", "display_name": "Context Merge Big (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Display Int (rgthree)": {"input": {"required": {"input": ["INT", {"forceInput": true}]}}, "input_order": {"required": ["input"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "Display Int (rgthree)", "display_name": "Display Int (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Display Any (rgthree)": {"input": {"required": {"source": ["*", {}]}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": ["source"], "hidden": ["unique_id", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "Display Any (rgthree)", "display_name": "Display Any (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": true, "has_intermediate_output": false, "search_aliases": []}, "Lora Loader Stack (rgthree)": {"input": {"required": {"model": ["MODEL"], "clip": ["CLIP"], "lora_01": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "strength_01": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_02": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "strength_02": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_03": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "strength_03": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}], "lora_04": [["None", "AnimateLCM/AnimateLCM_sd15_t2v_lora.safetensors", "Detailed_Hands-000001.safetensors", "Expressive_H-000001.safetensors", "FLUX.1-Turbo-Alpha.safetensors", "Flux2TurboComfyv2.safetensors", "Flux_2-Turbo-LoRA_comfyui.safetensors", "LineArt.safetensors", "OilPainting.safetensors", "PerfectEyesXL.safetensors", "Qwen-Edit-2509-Multiple-angles.safetensors", "Qwen-Image-Edit-2509-Anything2RealAlpha.safetensors", "Qwen-Image-Edit-2509-Fusion.safetensors", "Qwen-Image-Edit-2509-Light-Migration.safetensors", "Qwen-Image-Edit-2509-Relight.safetensors", "Qwen-Image-Edit-2509-White_to_Scene.safetensors", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors", "Qwen-Image-Lightning-4steps-V1.0.safetensors", "SDXL_StopMotion_lora.safetensors", "SU_Twrk_EP62.safetensors", "Samsung_UltraReal.safetensors", "UltraRealPhoto.safetensors", "WAN_dr34mj0b.safetensors", "Wan21_CausVid_14B_T2V_lora_rank32.safetensors", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32.safetensors", "add-detail-xl.safetensors", "add_detail.safetensors", "aidmaRealisticSkin-FLUX-v0.1.safetensors", "amateurphoto-v6-forcu.safetensors", "bounceV_01.safetensors", "chronoedit_distill_lora.safetensors", "comfyui_portrait_lora64.safetensors", "comfyui_subject_lora16.safetensors", "detailz-wan.safetensors", "diffusion_pytorch_model.safetensors", "flux.1-fill-dev-object-removal-lora.safetensors", "flux/In-Context-LoRA/couple-profile.safetensors", "flux/In-Context-LoRA/film-storyboard.safetensors", "flux/In-Context-LoRA/font-design.safetensors", "flux/In-Context-LoRA/home-decoration.safetensors", "flux/In-Context-LoRA/portrait-illustration.safetensors", "flux/In-Context-LoRA/portrait-photography.safetensors", "flux/In-Context-LoRA/ppt-templates.safetensors", "flux/In-Context-LoRA/sandstorm-visual-effect.safetensors", "flux/In-Context-LoRA/sparklers-visual-effect.safetensors", "flux/In-Context-LoRA/visual-identity-design.safetensors", "flux/UltraRealPhoto.safetensors", "flux/anime_lora.safetensors", "flux/anime_lora_comfy_converted.safetensors", "flux/art_lora.safetensors", "flux/art_lora_comfy_converted.safetensors", "flux/disney_lora.safetensors", "flux/disney_lora_comfy_converted.safetensors", "flux/flux.1-turbo-alpha/diffusion_pytorch_model.safetensors", "flux/furry_lora.safetensors", "flux/mjv6_lora.safetensors", "flux/mjv6_lora_comfy_converted.safetensors", "flux/realism_lora.safetensors", "flux/realism_lora_comfy_converted.safetensors", "flux/scenery_lora.safetensors", "flux/scenery_lora_comfy_converted.safetensors", "flux1-canny-dev-lora.safetensors", "flux1-depth-dev-lora.safetensors", "fluxRealSkin-V2.safetensors", "flux_realism_lora.safetensors", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16.safetensors", "gemma-3-12b-it-abliterated_lora_rank64_bf16.safetensors", "hunyuan/hyvideo_FastVideo_LoRA-fp8.safetensors", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16.safetensors", "insert-anything_extracted_lora_rank_64-bf16.safetensors", "kontext/Akira Toriyama - Dragon Ball Style - Kontext.safetensors", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer.safetensors", "kontext/GuoHuaKontextLoRA.safetensors", "kontext/HDR360.safetensors", "kontext/HXHY-RealisticKontextLoRA1.6.safetensors", "kontext/InScene-v1.0.safetensors", "kontext/Three-view sketch.safetensors", "kontext/fluffy-kontext-lora.safetensors", "kontext/ghibliStyle_kontext_byJaneB.safetensors", "kontext/glass-prism-kontext-lora.safetensors", "kontext/kontext-pixelize-v1.1_000001500.safetensors", "kontext/kontext_change_clothes_t1.safetensors", "kontext/mech-anything-kontext.safetensors", "kontext/unflux_v1.safetensors", "lcm/sd1/pytorch_lora_weights.safetensors", "lcm/sdxl/pytorch_lora_weights.safetensors", "ltx2-squish.safetensors", "more_details.safetensors", "qwen_image_union_diffsynth_lora.safetensors", "ral-dissolve-sdxl.safetensors", "refcontrol_pose.safetensors", "sd1/3232pixel.safetensors", "sd1/AoTStyle.safetensors", "sd1/AxelStone-DocStasis.safetensors", "sd1/DrippingArt.safetensors", "sd1/GTA_Style.safetensors", "sd1/LiquidAF-0-1.safetensors", "sd1/Longan.safetensors", "sd1/Magazine-10.safetensors", "sd1/Naruto.safetensors", "sd1/add_detail.safetensors", "sd1/anime_minimalist_v1-000020.safetensors", "sd1/animemix_v3_offset.safetensors", "sd1/animeoutlineV4_16.safetensors", "sd1/arcane_offset.safetensors", "sd1/concept_master_1.0.safetensors", "sd1/epi_noiseoffset2.pt", "sd1/ghibli_style_offset.safetensors", "sd1/ip-adapter-faceid-plus_sd15_lora.safetensors", "sd1/ip-adapter-faceid-plusv2_sd15_lora.safetensors", "sd1/ip-adapter-faceid_sd15_lora.safetensors", "sd1/modernlogo.safetensors", "sd1/more_details.safetensors", "sd1/neg4all_bdsqlsz_V3.5.safetensors", "sd1/sd15_lcm_lora.safetensors", "sd1/tangbohu-line_1.0.safetensors", "sd1/v3_sd15_adapter.ckpt", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", "sdxl/Aether_Fire_v1_SDXL_LoRA.safetensors", "sdxl/Aether_Ghost_v1.1_LoRA.safetensors", "sdxl/Aether_Ghost_v1_LoRA.safetensors", "sdxl/ClayAnimationRedm.safetensors", "sdxl/DreamARTSDXL.safetensors", "sdxl/MJ52.safetensors", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", "sdxl/ParchartXL-2.0.safetensors", "sdxl/PixelArtRedmond-Lite64.safetensors", "sdxl/StickersRedmond.safetensors", "sdxl/ToyRedmond-FnkRedmAF.safetensors", "sdxl/WowifierXL-V2.safetensors", "sdxl/add-detail-xl.safetensors", "sdxl/cereal_box_sdxl_v1.safetensors", "sdxl/crayons_v1_sdxl.safetensors", "sdxl/fofr/emoji.safetensors", "sdxl/ikea_instructions_xl_v1_5.safetensors", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora.safetensors", "sdxl/ip-adapter-faceid_sdxl_lora.safetensors", "sdxl/magik_smoke_v1.2.safetensors", "sdxl/neg4all_xl_bdsqlsz_V5.safetensors", "sdxl/sdxl_lightning_8step_lora.safetensors", "sdxl/sdxl_offset_example_v10.safetensors", "sdxl/wojak_SDXL.safetensors", "sdxl/xl_more_art-full_v1.safetensors", "uso-flux1-dit-lora-v1.safetensors", "wan2.1-1.3b-control-lora-tile-v1.0_comfy.safetensors", "wan2.2_animate_14B_relight_lora_bf16.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors", "wan_alpha_2.1_rgba_lora.safetensors", "z_image_turbo_distill_patch_lora_bf16.safetensors"]], "strength_04": ["FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}]}}, "input_order": {"required": ["model", "clip", "lora_01", "strength_01", "lora_02", "strength_02", "lora_03", "strength_03", "lora_04", "strength_04"]}, "is_input_list": false, "output": ["MODEL", "CLIP"], "output_is_list": [false, false], "output_name": ["MODEL", "CLIP"], "name": "Lora Loader Stack (rgthree)", "display_name": "Lora Loader Stack (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Seed (rgthree)": {"input": {"required": {"seed": ["INT", {"default": 0, "min": -1125899906842624, "max": 1125899906842624}]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"}}, "input_order": {"required": ["seed"], "hidden": ["prompt", "extra_pnginfo", "unique_id"]}, "is_input_list": false, "output": ["INT"], "output_is_list": [false], "output_name": ["SEED"], "name": "Seed (rgthree)", "display_name": "Seed (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Image Inset Crop (rgthree)": {"input": {"required": {"image": ["IMAGE"], "measurement": [["Pixels", "Percentage"]], "left": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "right": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "top": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}], "bottom": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 8}]}}, "input_order": {"required": ["image", "measurement", "left", "right", "top", "bottom"]}, "is_input_list": false, "output": ["IMAGE"], "output_is_list": [false], "output_name": ["IMAGE"], "name": "Image Inset Crop (rgthree)", "display_name": "Image Inset Crop (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Power Prompt (rgthree)": {"input": {"required": {"prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"opt_model": ["MODEL"], "opt_clip": ["CLIP"], "insert_lora": [["CHOOSE", "DISABLE LORAS", "AnimateLCM/AnimateLCM_sd15_t2v_lora", "Detailed_Hands-000001", "Expressive_H-000001", "FLUX.1-Turbo-Alpha", "Flux2TurboComfyv2", "Flux_2-Turbo-LoRA_comfyui", "LineArt", "OilPainting", "PerfectEyesXL", "Qwen-Edit-2509-Multiple-angles", "Qwen-Image-Edit-2509-Anything2RealAlpha", "Qwen-Image-Edit-2509-Fusion", "Qwen-Image-Edit-2509-Light-Migration", "Qwen-Image-Edit-2509-Relight", "Qwen-Image-Edit-2509-White_to_Scene", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16", "Qwen-Image-Lightning-4steps-V1.0", "SDXL_StopMotion_lora", "SU_Twrk_EP62", "Samsung_UltraReal", "UltraRealPhoto", "WAN_dr34mj0b", "Wan21_CausVid_14B_T2V_lora_rank32", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32", "add-detail-xl", "add_detail", "aidmaRealisticSkin-FLUX-v0.1", "amateurphoto-v6-forcu", "bounceV_01", "chronoedit_distill_lora", "comfyui_portrait_lora64", "comfyui_subject_lora16", "detailz-wan", "diffusion_pytorch_model", "flux.1-fill-dev-object-removal-lora", "flux/In-Context-LoRA/couple-profile", "flux/In-Context-LoRA/film-storyboard", "flux/In-Context-LoRA/font-design", "flux/In-Context-LoRA/home-decoration", "flux/In-Context-LoRA/portrait-illustration", "flux/In-Context-LoRA/portrait-photography", "flux/In-Context-LoRA/ppt-templates", "flux/In-Context-LoRA/sandstorm-visual-effect", "flux/In-Context-LoRA/sparklers-visual-effect", "flux/In-Context-LoRA/visual-identity-design", "flux/UltraRealPhoto", "flux/anime_lora", "flux/anime_lora_comfy_converted", "flux/art_lora", "flux/art_lora_comfy_converted", "flux/disney_lora", "flux/disney_lora_comfy_converted", "flux/flux.1-turbo-alpha/diffusion_pytorch_model", "flux/furry_lora", "flux/mjv6_lora", "flux/mjv6_lora_comfy_converted", "flux/realism_lora", "flux/realism_lora_comfy_converted", "flux/scenery_lora", "flux/scenery_lora_comfy_converted", "flux1-canny-dev-lora", "flux1-depth-dev-lora", "fluxRealSkin-V2", "flux_realism_lora", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16", "gemma-3-12b-it-abliterated_lora_rank64_bf16", "hunyuan/hyvideo_FastVideo_LoRA-fp8", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16", "insert-anything_extracted_lora_rank_64-bf16", "kontext/Akira Toriyama - Dragon Ball Style - Kontext", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer", "kontext/GuoHuaKontextLoRA", "kontext/HDR360", "kontext/HXHY-RealisticKontextLoRA1.6", "kontext/InScene-v1.0", "kontext/Three-view sketch", "kontext/fluffy-kontext-lora", "kontext/ghibliStyle_kontext_byJaneB", "kontext/glass-prism-kontext-lora", "kontext/kontext-pixelize-v1.1_000001500", "kontext/kontext_change_clothes_t1", "kontext/mech-anything-kontext", "kontext/unflux_v1", "lcm/sd1/pytorch_lora_weights", "lcm/sdxl/pytorch_lora_weights", "ltx2-squish", "more_details", "qwen_image_union_diffsynth_lora", "ral-dissolve-sdxl", "refcontrol_pose", "sd1/3232pixel", "sd1/AoTStyle", "sd1/AxelStone-DocStasis", "sd1/DrippingArt", "sd1/GTA_Style", "sd1/LiquidAF-0-1", "sd1/Longan", "sd1/Magazine-10", "sd1/Naruto", "sd1/add_detail", "sd1/anime_minimalist_v1-000020", "sd1/animemix_v3_offset", "sd1/animeoutlineV4_16", "sd1/arcane_offset", "sd1/concept_master_1.0", "sd1/epi_noiseoffset2", "sd1/ghibli_style_offset", "sd1/ip-adapter-faceid-plus_sd15_lora", "sd1/ip-adapter-faceid-plusv2_sd15_lora", "sd1/ip-adapter-faceid_sd15_lora", "sd1/modernlogo", "sd1/more_details", "sd1/neg4all_bdsqlsz_V3.5", "sd1/sd15_lcm_lora", "sd1/tangbohu-line_1.0", "sd1/v3_sd15_adapter", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF", "sdxl/Aether_Fire_v1_SDXL_LoRA", "sdxl/Aether_Ghost_v1.1_LoRA", "sdxl/Aether_Ghost_v1_LoRA", "sdxl/ClayAnimationRedm", "sdxl/DreamARTSDXL", "sdxl/MJ52", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics", "sdxl/ParchartXL-2.0", "sdxl/PixelArtRedmond-Lite64", "sdxl/StickersRedmond", "sdxl/ToyRedmond-FnkRedmAF", "sdxl/WowifierXL-V2", "sdxl/add-detail-xl", "sdxl/cereal_box_sdxl_v1", "sdxl/crayons_v1_sdxl", "sdxl/fofr/emoji", "sdxl/ikea_instructions_xl_v1_5", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora", "sdxl/ip-adapter-faceid_sdxl_lora", "sdxl/magik_smoke_v1.2", "sdxl/neg4all_xl_bdsqlsz_V5", "sdxl/sdxl_lightning_8step_lora", "sdxl/sdxl_offset_example_v10", "sdxl/wojak_SDXL", "sdxl/xl_more_art-full_v1", "uso-flux1-dit-lora-v1", "wan2.1-1.3b-control-lora-tile-v1.0_comfy", "wan2.2_animate_14B_relight_lora_bf16", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise", "wan_alpha_2.1_rgba_lora", "z_image_turbo_distill_patch_lora_bf16"]], "insert_embedding": [["CHOOSE", "AS-YoungV2-neg", "AS-YoungV2", "BadDream", "By bad artist -neg", "ERA09NEGV2", "EasyNegativeV2", "FastNegativeV2", "GS-DeFeminize-neg", "GS-DeMasculate-neg", "GS-Girlish", "Style-GravityMagic", "bad-hands-5", "bad-picture-chill-75v", "badhandv4", "badpic", "easynegative", "epiCNegative", "negative_hand-neg", "ng_deepnegative_v1_75t", "nobg", "prettify", "prettyeyes", "style-rustmagic-neg", "style-rustmagic", "verybadimagenegative_v1.3"]], "insert_saved": [["CHOOSE"]]}, "hidden": {"values_insert_saved": [["CHOOSE"]]}}, "input_order": {"required": ["prompt"], "optional": ["opt_model", "opt_clip", "insert_lora", "insert_embedding", "insert_saved"], "hidden": ["values_insert_saved"]}, "is_input_list": false, "output": ["CONDITIONING", "MODEL", "CLIP", "STRING"], "output_is_list": [false, false, false, false], "output_name": ["CONDITIONING", "MODEL", "CLIP", "TEXT"], "name": "Power Prompt (rgthree)", "display_name": "Power Prompt (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Power Prompt - Simple (rgthree)": {"input": {"required": {"prompt": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"opt_clip": ["CLIP"], "insert_embedding": [["CHOOSE", "AS-YoungV2-neg", "AS-YoungV2", "BadDream", "By bad artist -neg", "ERA09NEGV2", "EasyNegativeV2", "FastNegativeV2", "GS-DeFeminize-neg", "GS-DeMasculate-neg", "GS-Girlish", "Style-GravityMagic", "bad-hands-5", "bad-picture-chill-75v", "badhandv4", "badpic", "easynegative", "epiCNegative", "negative_hand-neg", "ng_deepnegative_v1_75t", "nobg", "prettify", "prettyeyes", "style-rustmagic-neg", "style-rustmagic", "verybadimagenegative_v1.3"]], "insert_saved": [["CHOOSE"]]}, "hidden": {"values_insert_saved": [["CHOOSE"]]}}, "input_order": {"required": ["prompt"], "optional": ["opt_clip", "insert_embedding", "insert_saved"], "hidden": ["values_insert_saved"]}, "is_input_list": false, "output": ["CONDITIONING", "STRING"], "output_is_list": [false, false], "output_name": ["CONDITIONING", "TEXT"], "name": "Power Prompt - Simple (rgthree)", "display_name": "Power Prompt - Simple (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "KSampler Config (rgthree)": {"input": {"required": {"steps_total": ["INT", {"default": 30, "min": 1, "max": 16384, "step": 1}], "refiner_step": ["INT", {"default": 24, "min": 1, "max": 16384, "step": 1}], "cfg": ["FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.5}], "sampler_name": [["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"]], "scheduler": [["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]]}}, "input_order": {"required": ["steps_total", "refiner_step", "cfg", "sampler_name", "scheduler"]}, "is_input_list": false, "output": ["INT", "INT", "FLOAT", ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "gradient_estimation_cfg_pp", "er_sde", "seeds_2", "seeds_3", "sa_solver", "sa_solver_pece", "ddim", "uni_pc", "uni_pc_bh2"], ["simple", "sgm_uniform", "karras", "exponential", "ddim_uniform", "beta", "normal", "linear_quadratic", "kl_optimal"]], "output_is_list": [false, false, false, false, false], "output_name": ["STEPS", "REFINER_STEP", "CFG", "SAMPLER", "SCHEDULER"], "name": "KSampler Config (rgthree)", "display_name": "KSampler Config (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SDXL Empty Latent Image (rgthree)": {"input": {"required": {"dimensions": [["1536 x 640 (landscape)", "1344 x 768 (landscape)", "1216 x 832 (landscape)", "1152 x 896 (landscape)", "1024 x 1024 (square)", " 896 x 1152 (portrait)", " 832 x 1216 (portrait)", " 768 x 1344 (portrait)", " 640 x 1536 (portrait)"], {"default": "1024 x 1024 (square)"}], "clip_scale": ["FLOAT", {"default": 2.0, "min": 1.0, "max": 10.0, "step": 0.5}], "batch_size": ["INT", {"default": 1, "min": 1, "max": 64}]}}, "input_order": {"required": ["dimensions", "clip_scale", "batch_size"]}, "is_input_list": false, "output": ["LATENT", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["LATENT", "CLIP_WIDTH", "CLIP_HEIGHT"], "name": "SDXL Empty Latent Image (rgthree)", "display_name": "SDXL Empty Latent Image (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SDXL Power Prompt - Positive (rgthree)": {"input": {"required": {"prompt_g": ["STRING", {"multiline": true, "dynamicPrompts": true}], "prompt_l": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"opt_model": ["MODEL"], "opt_clip": ["CLIP"], "opt_clip_width": ["INT", {"forceInput": true, "default": 1024.0, "min": 0, "max": 16384}], "opt_clip_height": ["INT", {"forceInput": true, "default": 1024.0, "min": 0, "max": 16384}], "insert_lora": [["CHOOSE", "DISABLE LORAS", "AnimateLCM/AnimateLCM_sd15_t2v_lora", "Detailed_Hands-000001", "Expressive_H-000001", "FLUX.1-Turbo-Alpha", "Flux2TurboComfyv2", "Flux_2-Turbo-LoRA_comfyui", "LineArt", "OilPainting", "PerfectEyesXL", "Qwen-Edit-2509-Multiple-angles", "Qwen-Image-Edit-2509-Anything2RealAlpha", "Qwen-Image-Edit-2509-Fusion", "Qwen-Image-Edit-2509-Light-Migration", "Qwen-Image-Edit-2509-Relight", "Qwen-Image-Edit-2509-White_to_Scene", "Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16", "Qwen-Image-Lightning-4steps-V1.0", "SDXL_StopMotion_lora", "SU_Twrk_EP62", "Samsung_UltraReal", "UltraRealPhoto", "WAN_dr34mj0b", "Wan21_CausVid_14B_T2V_lora_rank32", "Wan21_CausVid_bidirect2_T2V_1_3B_lora_rank32", "add-detail-xl", "add_detail", "aidmaRealisticSkin-FLUX-v0.1", "amateurphoto-v6-forcu", "bounceV_01", "chronoedit_distill_lora", "comfyui_portrait_lora64", "comfyui_subject_lora16", "detailz-wan", "diffusion_pytorch_model", "flux.1-fill-dev-object-removal-lora", "flux/In-Context-LoRA/couple-profile", "flux/In-Context-LoRA/film-storyboard", "flux/In-Context-LoRA/font-design", "flux/In-Context-LoRA/home-decoration", "flux/In-Context-LoRA/portrait-illustration", "flux/In-Context-LoRA/portrait-photography", "flux/In-Context-LoRA/ppt-templates", "flux/In-Context-LoRA/sandstorm-visual-effect", "flux/In-Context-LoRA/sparklers-visual-effect", "flux/In-Context-LoRA/visual-identity-design", "flux/UltraRealPhoto", "flux/anime_lora", "flux/anime_lora_comfy_converted", "flux/art_lora", "flux/art_lora_comfy_converted", "flux/disney_lora", "flux/disney_lora_comfy_converted", "flux/flux.1-turbo-alpha/diffusion_pytorch_model", "flux/furry_lora", "flux/mjv6_lora", "flux/mjv6_lora_comfy_converted", "flux/realism_lora", "flux/realism_lora_comfy_converted", "flux/scenery_lora", "flux/scenery_lora_comfy_converted", "flux1-canny-dev-lora", "flux1-depth-dev-lora", "fluxRealSkin-V2", "flux_realism_lora", "gemma-3-12b-it-abliterated_heretic_lora_rank64_bf16", "gemma-3-12b-it-abliterated_lora_rank64_bf16", "hunyuan/hyvideo_FastVideo_LoRA-fp8", "hunyuanvideo1.5_t2v_480p_lightx2v_4step_lora_rank_32_bf16", "insert-anything_extracted_lora_rank_64-bf16", "kontext/Akira Toriyama - Dragon Ball Style - Kontext", "kontext/FlatAnimation\u3010style\u3011_flux_kontext_lora_v1-PAseer", "kontext/GuoHuaKontextLoRA", "kontext/HDR360", "kontext/HXHY-RealisticKontextLoRA1.6", "kontext/InScene-v1.0", "kontext/Three-view sketch", "kontext/fluffy-kontext-lora", "kontext/ghibliStyle_kontext_byJaneB", "kontext/glass-prism-kontext-lora", "kontext/kontext-pixelize-v1.1_000001500", "kontext/kontext_change_clothes_t1", "kontext/mech-anything-kontext", "kontext/unflux_v1", "lcm/sd1/pytorch_lora_weights", "lcm/sdxl/pytorch_lora_weights", "ltx2-squish", "more_details", "qwen_image_union_diffsynth_lora", "ral-dissolve-sdxl", "refcontrol_pose", "sd1/3232pixel", "sd1/AoTStyle", "sd1/AxelStone-DocStasis", "sd1/DrippingArt", "sd1/GTA_Style", "sd1/LiquidAF-0-1", "sd1/Longan", "sd1/Magazine-10", "sd1/Naruto", "sd1/add_detail", "sd1/anime_minimalist_v1-000020", "sd1/animemix_v3_offset", "sd1/animeoutlineV4_16", "sd1/arcane_offset", "sd1/concept_master_1.0", "sd1/epi_noiseoffset2", "sd1/ghibli_style_offset", "sd1/ip-adapter-faceid-plus_sd15_lora", "sd1/ip-adapter-faceid-plusv2_sd15_lora", "sd1/ip-adapter-faceid_sd15_lora", "sd1/modernlogo", "sd1/more_details", "sd1/neg4all_bdsqlsz_V3.5", "sd1/sd15_lcm_lora", "sd1/tangbohu-line_1.0", "sd1/v3_sd15_adapter", "sdxl/3DRedmond-3DRenderStyle-3DRenderAF", "sdxl/Aether_Fire_v1_SDXL_LoRA", "sdxl/Aether_Ghost_v1.1_LoRA", "sdxl/Aether_Ghost_v1_LoRA", "sdxl/ClayAnimationRedm", "sdxl/DreamARTSDXL", "sdxl/MJ52", "sdxl/PS1Redmond-PS1Game-Playstation1Graphics", "sdxl/ParchartXL-2.0", "sdxl/PixelArtRedmond-Lite64", "sdxl/StickersRedmond", "sdxl/ToyRedmond-FnkRedmAF", "sdxl/WowifierXL-V2", "sdxl/add-detail-xl", "sdxl/cereal_box_sdxl_v1", "sdxl/crayons_v1_sdxl", "sdxl/fofr/emoji", "sdxl/ikea_instructions_xl_v1_5", "sdxl/ip-adapter-faceid-plusv2_sdxl_lora", "sdxl/ip-adapter-faceid_sdxl_lora", "sdxl/magik_smoke_v1.2", "sdxl/neg4all_xl_bdsqlsz_V5", "sdxl/sdxl_lightning_8step_lora", "sdxl/sdxl_offset_example_v10", "sdxl/wojak_SDXL", "sdxl/xl_more_art-full_v1", "uso-flux1-dit-lora-v1", "wan2.1-1.3b-control-lora-tile-v1.0_comfy", "wan2.2_animate_14B_relight_lora_bf16", "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise", "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise", "wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise", "wan_alpha_2.1_rgba_lora", "z_image_turbo_distill_patch_lora_bf16"]], "insert_embedding": [["CHOOSE", "AS-YoungV2-neg", "AS-YoungV2", "BadDream", "By bad artist -neg", "ERA09NEGV2", "EasyNegativeV2", "FastNegativeV2", "GS-DeFeminize-neg", "GS-DeMasculate-neg", "GS-Girlish", "Style-GravityMagic", "bad-hands-5", "bad-picture-chill-75v", "badhandv4", "badpic", "easynegative", "epiCNegative", "negative_hand-neg", "ng_deepnegative_v1_75t", "nobg", "prettify", "prettyeyes", "style-rustmagic-neg", "style-rustmagic", "verybadimagenegative_v1.3"]], "insert_saved": [["CHOOSE"]], "target_width": ["INT", {"default": -1, "min": -1, "max": 16384}], "target_height": ["INT", {"default": -1, "min": -1, "max": 16384}], "crop_width": ["INT", {"default": -1, "min": -1, "max": 16384}], "crop_height": ["INT", {"default": -1, "min": -1, "max": 16384}]}, "hidden": {"values_insert_saved": [["CHOOSE"]]}}, "input_order": {"required": ["prompt_g", "prompt_l"], "optional": ["opt_model", "opt_clip", "opt_clip_width", "opt_clip_height", "insert_lora", "insert_embedding", "insert_saved", "target_width", "target_height", "crop_width", "crop_height"], "hidden": ["values_insert_saved"]}, "is_input_list": false, "output": ["CONDITIONING", "MODEL", "CLIP", "STRING", "STRING"], "output_is_list": [false, false, false, false, false], "output_name": ["CONDITIONING", "MODEL", "CLIP", "TEXT_G", "TEXT_L"], "name": "SDXL Power Prompt - Positive (rgthree)", "display_name": "SDXL Power Prompt - Positive (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "SDXL Power Prompt - Simple / Negative (rgthree)": {"input": {"required": {"prompt_g": ["STRING", {"multiline": true, "dynamicPrompts": true}], "prompt_l": ["STRING", {"multiline": true, "dynamicPrompts": true}]}, "optional": {"opt_clip": ["CLIP"], "opt_clip_width": ["INT", {"forceInput": true, "default": 1024.0, "min": 0, "max": 16384}], "opt_clip_height": ["INT", {"forceInput": true, "default": 1024.0, "min": 0, "max": 16384}], "insert_embedding": [["CHOOSE", "AS-YoungV2-neg", "AS-YoungV2", "BadDream", "By bad artist -neg", "ERA09NEGV2", "EasyNegativeV2", "FastNegativeV2", "GS-DeFeminize-neg", "GS-DeMasculate-neg", "GS-Girlish", "Style-GravityMagic", "bad-hands-5", "bad-picture-chill-75v", "badhandv4", "badpic", "easynegative", "epiCNegative", "negative_hand-neg", "ng_deepnegative_v1_75t", "nobg", "prettify", "prettyeyes", "style-rustmagic-neg", "style-rustmagic", "verybadimagenegative_v1.3"]], "insert_saved": [["CHOOSE"]], "target_width": ["INT", {"default": -1, "min": -1, "max": 16384}], "target_height": ["INT", {"default": -1, "min": -1, "max": 16384}], "crop_width": ["INT", {"default": -1, "min": -1, "max": 16384}], "crop_height": ["INT", {"default": -1, "min": -1, "max": 16384}]}, "hidden": {"values_insert_saved": [["CHOOSE"]]}}, "input_order": {"required": ["prompt_g", "prompt_l"], "optional": ["opt_clip", "opt_clip_width", "opt_clip_height", "insert_embedding", "insert_saved", "target_width", "target_height", "crop_width", "crop_height"], "hidden": ["values_insert_saved"]}, "is_input_list": false, "output": ["CONDITIONING", "STRING", "STRING"], "output_is_list": [false, false, false], "output_name": ["CONDITIONING", "TEXT_G", "TEXT_L"], "name": "SDXL Power Prompt - Simple / Negative (rgthree)", "display_name": "SDXL Power Prompt - Simple / Negative (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Any Switch (rgthree)": {"input": {"required": {}, "optional": {}}, "input_order": {"required": [], "optional": []}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "Any Switch (rgthree)", "display_name": "Any Switch (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Image Comparer (rgthree)": {"input": {"required": {}, "optional": {"image_a": ["IMAGE"], "image_b": ["IMAGE"]}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}}, "input_order": {"required": [], "optional": ["image_a", "image_b"], "hidden": ["prompt", "extra_pnginfo"]}, "is_input_list": false, "output": [], "output_is_list": [], "output_name": [], "name": "Image Comparer (rgthree)", "display_name": "Image Comparer (rgthree)", "description": "Saves the input images to your ComfyUI output directory.", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": true, "has_intermediate_output": false, "search_aliases": ["preview", "preview image", "show image", "view image", "display image", "image viewer"], "essentials_category": "Basics"}, "Power Lora Loader (rgthree)": {"input": {"required": {}, "optional": {"model": ["MODEL"], "clip": ["CLIP"]}, "hidden": {}}, "input_order": {"required": [], "optional": ["model", "clip"], "hidden": []}, "is_input_list": false, "output": ["MODEL", "CLIP"], "output_is_list": [false, false], "output_name": ["MODEL", "CLIP"], "name": "Power Lora Loader (rgthree)", "display_name": "Power Lora Loader (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Power Primitive (rgthree)": {"input": {"required": {}, "optional": {}}, "input_order": {"required": [], "optional": []}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "Power Primitive (rgthree)", "display_name": "Power Primitive (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Image or Latent Size (rgthree)": {"input": {"required": {}, "optional": {}}, "input_order": {"required": [], "optional": []}, "is_input_list": false, "output": ["INT", "INT"], "output_is_list": [false, false], "output_name": ["WIDTH", "HEIGHT"], "name": "Image or Latent Size (rgthree)", "display_name": "Image or Latent Size (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Image Resize (rgthree)": {"input": {"required": {"image": ["IMAGE"], "measurement": [["pixels", "percentage"]], "width": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1, "tooltip": "The width of the desired resize. A pixel value if measurement is 'pixels' or a 100% scale percentage value if measurement is 'percentage'. Passing '0' will calculate the dimension based on the height."}], "height": ["INT", {"default": 0, "min": 0, "max": 16384, "step": 1}], "fit": [["crop", "pad", "contain"], {"tooltip": "'crop' resizes so the image covers the desired width and height, and center-crops the excess, returning exactly the desired width and height.\n'pad' resizes so the image fits inside the desired width and height, and fills the empty space returning exactly the desired width and height.\n'contain' resizes so the image fits inside the desired width and height, and returns the image with it's new size, with one side liekly smaller than the desired.\n\nNote, if either width or height is '0', the effective fit is 'contain'."}], "method": [["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]]}}, "input_order": {"required": ["image", "measurement", "width", "height", "fit", "method"]}, "is_input_list": false, "output": ["IMAGE", "INT", "INT"], "output_is_list": [false, false, false], "output_name": ["IMAGE", "WIDTH", "HEIGHT"], "name": "Image Resize (rgthree)", "display_name": "Image Resize (rgthree)", "description": "Resize the image.", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}, "Power Puter (rgthree)": {"input": {"required": {}, "optional": {}, "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO", "prompt": "PROMPT"}}, "input_order": {"required": [], "optional": [], "hidden": ["unique_id", "extra_pnginfo", "prompt"]}, "is_input_list": false, "output": ["*"], "output_is_list": [false], "output_name": ["*"], "name": "Power Puter (rgthree)", "display_name": "Power Puter (rgthree)", "description": "", "python_module": "custom_nodes.rgthree-comfy", "category": "rgthree", "output_node": false, "has_intermediate_output": false, "search_aliases": []}}