pipeline_powerpaint.py 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. # Copyright 2023 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import inspect
  15. from typing import Any, Callable, Dict, List, Optional, Union
  16. import numpy as np
  17. import PIL
  18. import torch
  19. from diffusers.configuration_utils import FrozenDict
  20. from diffusers.image_processor import VaeImageProcessor
  21. from diffusers.loaders import (
  22. FromSingleFileMixin,
  23. LoraLoaderMixin,
  24. TextualInversionLoaderMixin,
  25. )
  26. from diffusers.models import (
  27. AsymmetricAutoencoderKL,
  28. AutoencoderKL,
  29. UNet2DConditionModel,
  30. )
  31. from diffusers.pipelines.pipeline_utils import DiffusionPipeline
  32. from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
  33. from diffusers.pipelines.stable_diffusion.safety_checker import (
  34. StableDiffusionSafetyChecker,
  35. )
  36. from diffusers.schedulers import KarrasDiffusionSchedulers
  37. from diffusers.utils import (
  38. deprecate,
  39. is_accelerate_available,
  40. is_accelerate_version,
  41. logging,
  42. )
  43. from diffusers.utils.torch_utils import randn_tensor
  44. from packaging import version
  45. from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
  46. logger = logging.get_logger(__name__) # pylint: disable=invalid-name
  47. def prepare_mask_and_masked_image(
  48. image, mask, height, width, return_image: bool = False
  49. ):
  50. """
  51. Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
  52. converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
  53. ``image`` and ``1`` for the ``mask``.
  54. The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
  55. binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
  56. Args:
  57. image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
  58. It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
  59. ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
  60. mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
  61. It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
  62. ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
  63. Raises:
  64. ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
  65. should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
  66. TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
  67. (ot the other way around).
  68. Returns:
  69. tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
  70. dimensions: ``batch x channels x height x width``.
  71. """
  72. if image is None:
  73. raise ValueError("`image` input cannot be undefined.")
  74. if mask is None:
  75. raise ValueError("`mask_image` input cannot be undefined.")
  76. if isinstance(image, torch.Tensor):
  77. if not isinstance(mask, torch.Tensor):
  78. raise TypeError(
  79. f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not"
  80. )
  81. # Batch single image
  82. if image.ndim == 3:
  83. assert (
  84. image.shape[0] == 3
  85. ), "Image outside a batch should be of shape (3, H, W)"
  86. image = image.unsqueeze(0)
  87. # Batch and add channel dim for single mask
  88. if mask.ndim == 2:
  89. mask = mask.unsqueeze(0).unsqueeze(0)
  90. # Batch single mask or add channel dim
  91. if mask.ndim == 3:
  92. # Single batched mask, no channel dim or single mask not batched but channel dim
  93. if mask.shape[0] == 1:
  94. mask = mask.unsqueeze(0)
  95. # Batched masks no channel dim
  96. else:
  97. mask = mask.unsqueeze(1)
  98. assert (
  99. image.ndim == 4 and mask.ndim == 4
  100. ), "Image and Mask must have 4 dimensions"
  101. assert (
  102. image.shape[-2:] == mask.shape[-2:]
  103. ), "Image and Mask must have the same spatial dimensions"
  104. assert (
  105. image.shape[0] == mask.shape[0]
  106. ), "Image and Mask must have the same batch size"
  107. # Check image is in [-1, 1]
  108. if image.min() < -1 or image.max() > 1:
  109. raise ValueError("Image should be in [-1, 1] range")
  110. # Check mask is in [0, 1]
  111. if mask.min() < 0 or mask.max() > 1:
  112. raise ValueError("Mask should be in [0, 1] range")
  113. # Binarize mask
  114. mask[mask < 0.5] = 0
  115. mask[mask >= 0.5] = 1
  116. # Image as float32
  117. image = image.to(dtype=torch.float32)
  118. elif isinstance(mask, torch.Tensor):
  119. raise TypeError(
  120. f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not"
  121. )
  122. else:
  123. # preprocess image
  124. if isinstance(image, (PIL.Image.Image, np.ndarray)):
  125. image = [image]
  126. if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
  127. # resize all images w.r.t passed height an width
  128. image = [
  129. i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image
  130. ]
  131. image = [np.array(i.convert("RGB"))[None, :] for i in image]
  132. image = np.concatenate(image, axis=0)
  133. elif isinstance(image, list) and isinstance(image[0], np.ndarray):
  134. image = np.concatenate([i[None, :] for i in image], axis=0)
  135. image = image.transpose(0, 3, 1, 2)
  136. image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
  137. # preprocess mask
  138. if isinstance(mask, (PIL.Image.Image, np.ndarray)):
  139. mask = [mask]
  140. if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
  141. mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
  142. mask = np.concatenate(
  143. [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0
  144. )
  145. mask = mask.astype(np.float32) / 255.0
  146. elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
  147. mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
  148. mask[mask < 0.5] = 0
  149. mask[mask >= 0.5] = 1
  150. mask = torch.from_numpy(mask)
  151. masked_image = image * (mask < 0.5)
  152. # n.b. ensure backwards compatibility as old function does not return image
  153. if return_image:
  154. return mask, masked_image, image
  155. return mask, masked_image
  156. class StableDiffusionInpaintPipeline(
  157. DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
  158. ):
  159. r"""
  160. Pipeline for text-guided image inpainting using Stable Diffusion.
  161. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
  162. implemented for all pipelines (downloading, saving, running on a particular device, etc.).
  163. The pipeline also inherits the following loading methods:
  164. - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
  165. - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
  166. - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
  167. Args:
  168. vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]):
  169. Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
  170. text_encoder ([`CLIPTextModel`]):
  171. Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
  172. tokenizer ([`~transformers.CLIPTokenizer`]):
  173. A `CLIPTokenizer` to tokenize text.
  174. unet ([`UNet2DConditionModel`]):
  175. A `UNet2DConditionModel` to denoise the encoded image latents.
  176. scheduler ([`SchedulerMixin`]):
  177. A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
  178. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
  179. safety_checker ([`StableDiffusionSafetyChecker`]):
  180. Classification module that estimates whether generated images could be considered offensive or harmful.
  181. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
  182. about a model's potential harms.
  183. feature_extractor ([`~transformers.CLIPImageProcessor`]):
  184. A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
  185. """
  186. _optional_components = ["safety_checker", "feature_extractor"]
  187. def __init__(
  188. self,
  189. vae: Union[AutoencoderKL, AsymmetricAutoencoderKL],
  190. text_encoder: CLIPTextModel,
  191. tokenizer: CLIPTokenizer,
  192. unet: UNet2DConditionModel,
  193. scheduler: KarrasDiffusionSchedulers,
  194. safety_checker: StableDiffusionSafetyChecker,
  195. feature_extractor: CLIPImageProcessor,
  196. requires_safety_checker: bool = True,
  197. ):
  198. super().__init__()
  199. if (
  200. hasattr(scheduler.config, "steps_offset")
  201. and scheduler.config.steps_offset != 1
  202. ):
  203. deprecation_message = (
  204. f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
  205. f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
  206. "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
  207. " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
  208. " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
  209. " file"
  210. )
  211. deprecate(
  212. "steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False
  213. )
  214. new_config = dict(scheduler.config)
  215. new_config["steps_offset"] = 1
  216. scheduler._internal_dict = FrozenDict(new_config)
  217. if (
  218. hasattr(scheduler.config, "skip_prk_steps")
  219. and scheduler.config.skip_prk_steps is False
  220. ):
  221. deprecation_message = (
  222. f"The configuration file of this scheduler: {scheduler} has not set the configuration"
  223. " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
  224. " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
  225. " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
  226. " Hub, it would be very nice if you could open a Pull request for the"
  227. " `scheduler/scheduler_config.json` file"
  228. )
  229. deprecate(
  230. "skip_prk_steps not set",
  231. "1.0.0",
  232. deprecation_message,
  233. standard_warn=False,
  234. )
  235. new_config = dict(scheduler.config)
  236. new_config["skip_prk_steps"] = True
  237. scheduler._internal_dict = FrozenDict(new_config)
  238. if safety_checker is None and requires_safety_checker:
  239. logger.warning(
  240. f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
  241. " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
  242. " results in services or applications open to the public. Both the diffusers team and Hugging Face"
  243. " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
  244. " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
  245. " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
  246. )
  247. if safety_checker is not None and feature_extractor is None:
  248. raise ValueError(
  249. f"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
  250. " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
  251. )
  252. is_unet_version_less_0_9_0 = hasattr(
  253. unet.config, "_diffusers_version"
  254. ) and version.parse(
  255. version.parse(unet.config._diffusers_version).base_version
  256. ) < version.parse(
  257. "0.9.0.dev0"
  258. )
  259. is_unet_sample_size_less_64 = (
  260. hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
  261. )
  262. if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
  263. deprecation_message = (
  264. "The configuration file of the unet has set the default `sample_size` to smaller than"
  265. " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
  266. " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
  267. " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
  268. " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
  269. " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
  270. " in the config might lead to incorrect results in future versions. If you have downloaded this"
  271. " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
  272. " the `unet/config.json` file"
  273. )
  274. deprecate(
  275. "sample_size<64", "1.0.0", deprecation_message, standard_warn=False
  276. )
  277. new_config = dict(unet.config)
  278. new_config["sample_size"] = 64
  279. unet._internal_dict = FrozenDict(new_config)
  280. # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4
  281. if unet.config.in_channels != 9:
  282. logger.info(
  283. f"You have loaded a UNet with {unet.config.in_channels} input channels which."
  284. )
  285. self.register_modules(
  286. vae=vae,
  287. text_encoder=text_encoder,
  288. tokenizer=tokenizer,
  289. unet=unet,
  290. scheduler=scheduler,
  291. safety_checker=safety_checker,
  292. feature_extractor=feature_extractor,
  293. )
  294. self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
  295. self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
  296. self.register_to_config(requires_safety_checker=requires_safety_checker)
  297. # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
  298. def enable_model_cpu_offload(self, gpu_id=0):
  299. r"""
  300. Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
  301. time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
  302. Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
  303. iterative execution of the `unet`.
  304. """
  305. if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
  306. from accelerate import cpu_offload_with_hook
  307. else:
  308. raise ImportError(
  309. "`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher."
  310. )
  311. device = torch.device(f"cuda:{gpu_id}")
  312. if self.device.type != "cpu":
  313. self.to("cpu", silence_dtype_warnings=True)
  314. torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
  315. hook = None
  316. for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
  317. _, hook = cpu_offload_with_hook(
  318. cpu_offloaded_model, device, prev_module_hook=hook
  319. )
  320. if self.safety_checker is not None:
  321. _, hook = cpu_offload_with_hook(
  322. self.safety_checker, device, prev_module_hook=hook
  323. )
  324. # We'll offload the last model manually.
  325. self.final_offload_hook = hook
  326. # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
  327. def _encode_prompt(
  328. self,
  329. promptA,
  330. promptB,
  331. t,
  332. device,
  333. num_images_per_prompt,
  334. do_classifier_free_guidance,
  335. negative_promptA=None,
  336. negative_promptB=None,
  337. t_nag=None,
  338. prompt_embeds: Optional[torch.FloatTensor] = None,
  339. negative_prompt_embeds: Optional[torch.FloatTensor] = None,
  340. lora_scale: Optional[float] = None,
  341. ):
  342. r"""
  343. Encodes the prompt into text encoder hidden states.
  344. Args:
  345. prompt (`str` or `List[str]`, *optional*):
  346. prompt to be encoded
  347. device: (`torch.device`):
  348. torch device
  349. num_images_per_prompt (`int`):
  350. number of images that should be generated per prompt
  351. do_classifier_free_guidance (`bool`):
  352. whether to use classifier free guidance or not
  353. negative_prompt (`str` or `List[str]`, *optional*):
  354. The prompt or prompts not to guide the image generation. If not defined, one has to pass
  355. `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
  356. less than `1`).
  357. prompt_embeds (`torch.FloatTensor`, *optional*):
  358. Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
  359. provided, text embeddings will be generated from `prompt` input argument.
  360. negative_prompt_embeds (`torch.FloatTensor`, *optional*):
  361. Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
  362. weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
  363. argument.
  364. lora_scale (`float`, *optional*):
  365. A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
  366. """
  367. # set lora scale so that monkey patched LoRA
  368. # function of text encoder can correctly access it
  369. if lora_scale is not None and isinstance(self, LoraLoaderMixin):
  370. self._lora_scale = lora_scale
  371. prompt = promptA
  372. negative_prompt = negative_promptA
  373. if promptA is not None and isinstance(promptA, str):
  374. batch_size = 1
  375. elif promptA is not None and isinstance(promptA, list):
  376. batch_size = len(promptA)
  377. else:
  378. batch_size = prompt_embeds.shape[0]
  379. if prompt_embeds is None:
  380. # textual inversion: procecss multi-vector tokens if necessary
  381. if isinstance(self, TextualInversionLoaderMixin):
  382. promptA = self.maybe_convert_prompt(promptA, self.tokenizer)
  383. text_inputsA = self.tokenizer(
  384. promptA,
  385. padding="max_length",
  386. max_length=self.tokenizer.model_max_length,
  387. truncation=True,
  388. return_tensors="pt",
  389. )
  390. text_inputsB = self.tokenizer(
  391. promptB,
  392. padding="max_length",
  393. max_length=self.tokenizer.model_max_length,
  394. truncation=True,
  395. return_tensors="pt",
  396. )
  397. text_input_idsA = text_inputsA.input_ids
  398. text_input_idsB = text_inputsB.input_ids
  399. untruncated_ids = self.tokenizer(
  400. promptA, padding="longest", return_tensors="pt"
  401. ).input_ids
  402. if untruncated_ids.shape[-1] >= text_input_idsA.shape[
  403. -1
  404. ] and not torch.equal(text_input_idsA, untruncated_ids):
  405. removed_text = self.tokenizer.batch_decode(
  406. untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
  407. )
  408. logger.warning(
  409. "The following part of your input was truncated because CLIP can only handle sequences up to"
  410. f" {self.tokenizer.model_max_length} tokens: {removed_text}"
  411. )
  412. if (
  413. hasattr(self.text_encoder.config, "use_attention_mask")
  414. and self.text_encoder.config.use_attention_mask
  415. ):
  416. attention_mask = text_inputsA.attention_mask.to(device)
  417. else:
  418. attention_mask = None
  419. # print("text_input_idsA: ",text_input_idsA)
  420. # print("text_input_idsB: ",text_input_idsB)
  421. # print('t: ',t)
  422. prompt_embedsA = self.text_encoder(
  423. text_input_idsA.to(device),
  424. attention_mask=attention_mask,
  425. )
  426. prompt_embedsA = prompt_embedsA[0]
  427. prompt_embedsB = self.text_encoder(
  428. text_input_idsB.to(device),
  429. attention_mask=attention_mask,
  430. )
  431. prompt_embedsB = prompt_embedsB[0]
  432. prompt_embeds = prompt_embedsA * (t) + (1 - t) * prompt_embedsB
  433. # print("prompt_embeds: ",prompt_embeds)
  434. if self.text_encoder is not None:
  435. prompt_embeds_dtype = self.text_encoder.dtype
  436. elif self.unet is not None:
  437. prompt_embeds_dtype = self.unet.dtype
  438. else:
  439. prompt_embeds_dtype = prompt_embeds.dtype
  440. prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
  441. bs_embed, seq_len, _ = prompt_embeds.shape
  442. # duplicate text embeddings for each generation per prompt, using mps friendly method
  443. prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
  444. prompt_embeds = prompt_embeds.view(
  445. bs_embed * num_images_per_prompt, seq_len, -1
  446. )
  447. # get unconditional embeddings for classifier free guidance
  448. if do_classifier_free_guidance and negative_prompt_embeds is None:
  449. uncond_tokensA: List[str]
  450. uncond_tokensB: List[str]
  451. if negative_prompt is None:
  452. uncond_tokensA = [""] * batch_size
  453. uncond_tokensB = [""] * batch_size
  454. elif prompt is not None and type(prompt) is not type(negative_prompt):
  455. raise TypeError(
  456. f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
  457. f" {type(prompt)}."
  458. )
  459. elif isinstance(negative_prompt, str):
  460. uncond_tokensA = [negative_promptA]
  461. uncond_tokensB = [negative_promptB]
  462. elif batch_size != len(negative_prompt):
  463. raise ValueError(
  464. f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
  465. f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
  466. " the batch size of `prompt`."
  467. )
  468. else:
  469. uncond_tokensA = negative_promptA
  470. uncond_tokensB = negative_promptB
  471. # textual inversion: procecss multi-vector tokens if necessary
  472. if isinstance(self, TextualInversionLoaderMixin):
  473. uncond_tokensA = self.maybe_convert_prompt(
  474. uncond_tokensA, self.tokenizer
  475. )
  476. uncond_tokensB = self.maybe_convert_prompt(
  477. uncond_tokensB, self.tokenizer
  478. )
  479. max_length = prompt_embeds.shape[1]
  480. uncond_inputA = self.tokenizer(
  481. uncond_tokensA,
  482. padding="max_length",
  483. max_length=max_length,
  484. truncation=True,
  485. return_tensors="pt",
  486. )
  487. uncond_inputB = self.tokenizer(
  488. uncond_tokensB,
  489. padding="max_length",
  490. max_length=max_length,
  491. truncation=True,
  492. return_tensors="pt",
  493. )
  494. if (
  495. hasattr(self.text_encoder.config, "use_attention_mask")
  496. and self.text_encoder.config.use_attention_mask
  497. ):
  498. attention_mask = uncond_inputA.attention_mask.to(device)
  499. else:
  500. attention_mask = None
  501. negative_prompt_embedsA = self.text_encoder(
  502. uncond_inputA.input_ids.to(device),
  503. attention_mask=attention_mask,
  504. )
  505. negative_prompt_embedsB = self.text_encoder(
  506. uncond_inputB.input_ids.to(device),
  507. attention_mask=attention_mask,
  508. )
  509. negative_prompt_embeds = (
  510. negative_prompt_embedsA[0] * (t_nag)
  511. + (1 - t_nag) * negative_prompt_embedsB[0]
  512. )
  513. # negative_prompt_embeds = negative_prompt_embeds[0]
  514. if do_classifier_free_guidance:
  515. # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
  516. seq_len = negative_prompt_embeds.shape[1]
  517. negative_prompt_embeds = negative_prompt_embeds.to(
  518. dtype=prompt_embeds_dtype, device=device
  519. )
  520. negative_prompt_embeds = negative_prompt_embeds.repeat(
  521. 1, num_images_per_prompt, 1
  522. )
  523. negative_prompt_embeds = negative_prompt_embeds.view(
  524. batch_size * num_images_per_prompt, seq_len, -1
  525. )
  526. # For classifier free guidance, we need to do two forward passes.
  527. # Here we concatenate the unconditional and text embeddings into a single batch
  528. # to avoid doing two forward passes
  529. # print("prompt_embeds: ",prompt_embeds)
  530. prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
  531. return prompt_embeds
  532. # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
  533. def run_safety_checker(self, image, device, dtype):
  534. if self.safety_checker is None:
  535. has_nsfw_concept = None
  536. else:
  537. if torch.is_tensor(image):
  538. feature_extractor_input = self.image_processor.postprocess(
  539. image, output_type="pil"
  540. )
  541. else:
  542. feature_extractor_input = self.image_processor.numpy_to_pil(image)
  543. safety_checker_input = self.feature_extractor(
  544. feature_extractor_input, return_tensors="pt"
  545. ).to(device)
  546. image, has_nsfw_concept = self.safety_checker(
  547. images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
  548. )
  549. return image, has_nsfw_concept
  550. # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
  551. def prepare_extra_step_kwargs(self, generator, eta):
  552. # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
  553. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
  554. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
  555. # and should be between [0, 1]
  556. accepts_eta = "eta" in set(
  557. inspect.signature(self.scheduler.step).parameters.keys()
  558. )
  559. extra_step_kwargs = {}
  560. if accepts_eta:
  561. extra_step_kwargs["eta"] = eta
  562. # check if the scheduler accepts generator
  563. accepts_generator = "generator" in set(
  564. inspect.signature(self.scheduler.step).parameters.keys()
  565. )
  566. if accepts_generator:
  567. extra_step_kwargs["generator"] = generator
  568. return extra_step_kwargs
  569. def check_inputs(
  570. self,
  571. prompt,
  572. height,
  573. width,
  574. strength,
  575. callback_steps,
  576. negative_prompt=None,
  577. prompt_embeds=None,
  578. negative_prompt_embeds=None,
  579. ):
  580. if strength < 0 or strength > 1:
  581. raise ValueError(
  582. f"The value of strength should in [0.0, 1.0] but is {strength}"
  583. )
  584. if height % 8 != 0 or width % 8 != 0:
  585. raise ValueError(
  586. f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
  587. )
  588. if (callback_steps is None) or (
  589. callback_steps is not None
  590. and (not isinstance(callback_steps, int) or callback_steps <= 0)
  591. ):
  592. raise ValueError(
  593. f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
  594. f" {type(callback_steps)}."
  595. )
  596. if prompt is not None and prompt_embeds is not None:
  597. raise ValueError(
  598. f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
  599. " only forward one of the two."
  600. )
  601. elif prompt is None and prompt_embeds is None:
  602. raise ValueError(
  603. "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
  604. )
  605. elif prompt is not None and (
  606. not isinstance(prompt, str) and not isinstance(prompt, list)
  607. ):
  608. raise ValueError(
  609. f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
  610. )
  611. if negative_prompt is not None and negative_prompt_embeds is not None:
  612. raise ValueError(
  613. f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
  614. f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
  615. )
  616. if prompt_embeds is not None and negative_prompt_embeds is not None:
  617. if prompt_embeds.shape != negative_prompt_embeds.shape:
  618. raise ValueError(
  619. "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
  620. f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
  621. f" {negative_prompt_embeds.shape}."
  622. )
  623. def prepare_latents(
  624. self,
  625. batch_size,
  626. num_channels_latents,
  627. height,
  628. width,
  629. dtype,
  630. device,
  631. generator,
  632. latents=None,
  633. image=None,
  634. timestep=None,
  635. is_strength_max=True,
  636. return_noise=False,
  637. return_image_latents=False,
  638. ):
  639. shape = (
  640. batch_size,
  641. num_channels_latents,
  642. height // self.vae_scale_factor,
  643. width // self.vae_scale_factor,
  644. )
  645. if isinstance(generator, list) and len(generator) != batch_size:
  646. raise ValueError(
  647. f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
  648. f" size of {batch_size}. Make sure the batch size matches the length of the generators."
  649. )
  650. if (image is None or timestep is None) and not is_strength_max:
  651. raise ValueError(
  652. "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
  653. "However, either the image or the noise timestep has not been provided."
  654. )
  655. if return_image_latents or (latents is None and not is_strength_max):
  656. image = image.to(device=device, dtype=dtype)
  657. image_latents = self._encode_vae_image(image=image, generator=generator)
  658. if latents is None:
  659. noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
  660. # if strength is 1. then initialise the latents to noise, else initial to image + noise
  661. latents = (
  662. noise
  663. if is_strength_max
  664. else self.scheduler.add_noise(image_latents, noise, timestep)
  665. )
  666. # if pure noise then scale the initial latents by the Scheduler's init sigma
  667. latents = (
  668. latents * self.scheduler.init_noise_sigma
  669. if is_strength_max
  670. else latents
  671. )
  672. else:
  673. noise = latents.to(device)
  674. latents = noise * self.scheduler.init_noise_sigma
  675. outputs = (latents,)
  676. if return_noise:
  677. outputs += (noise,)
  678. if return_image_latents:
  679. outputs += (image_latents,)
  680. return outputs
  681. def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
  682. if isinstance(generator, list):
  683. image_latents = [
  684. self.vae.encode(image[i : i + 1]).latent_dist.sample(
  685. generator=generator[i]
  686. )
  687. for i in range(image.shape[0])
  688. ]
  689. image_latents = torch.cat(image_latents, dim=0)
  690. else:
  691. image_latents = self.vae.encode(image).latent_dist.sample(
  692. generator=generator
  693. )
  694. image_latents = self.vae.config.scaling_factor * image_latents
  695. return image_latents
  696. def prepare_mask_latents(
  697. self,
  698. mask,
  699. masked_image,
  700. batch_size,
  701. height,
  702. width,
  703. dtype,
  704. device,
  705. generator,
  706. do_classifier_free_guidance,
  707. ):
  708. # resize the mask to latents shape as we concatenate the mask to the latents
  709. # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
  710. # and half precision
  711. mask = torch.nn.functional.interpolate(
  712. mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
  713. )
  714. mask = mask.to(device=device, dtype=dtype)
  715. masked_image = masked_image.to(device=device, dtype=dtype)
  716. masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
  717. # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
  718. if mask.shape[0] < batch_size:
  719. if not batch_size % mask.shape[0] == 0:
  720. raise ValueError(
  721. "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
  722. f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
  723. " of masks that you pass is divisible by the total requested batch size."
  724. )
  725. mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
  726. if masked_image_latents.shape[0] < batch_size:
  727. if not batch_size % masked_image_latents.shape[0] == 0:
  728. raise ValueError(
  729. "The passed images and the required batch size don't match. Images are supposed to be duplicated"
  730. f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
  731. " Make sure the number of images that you pass is divisible by the total requested batch size."
  732. )
  733. masked_image_latents = masked_image_latents.repeat(
  734. batch_size // masked_image_latents.shape[0], 1, 1, 1
  735. )
  736. mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
  737. masked_image_latents = (
  738. torch.cat([masked_image_latents] * 2)
  739. if do_classifier_free_guidance
  740. else masked_image_latents
  741. )
  742. # aligning device to prevent device errors when concating it with the latent model input
  743. masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
  744. return mask, masked_image_latents
  745. # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
  746. def get_timesteps(self, num_inference_steps, strength, device):
  747. # get the original timestep using init_timestep
  748. init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
  749. t_start = max(num_inference_steps - init_timestep, 0)
  750. timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
  751. return timesteps, num_inference_steps - t_start
  752. @torch.no_grad()
  753. def __call__(
  754. self,
  755. promptA: Union[str, List[str]] = None,
  756. promptB: Union[str, List[str]] = None,
  757. image: Union[torch.FloatTensor, PIL.Image.Image] = None,
  758. mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
  759. height: Optional[int] = None,
  760. width: Optional[int] = None,
  761. strength: float = 1.0,
  762. tradoff: float = 1.0,
  763. tradoff_nag: float = 1.0,
  764. num_inference_steps: int = 50,
  765. guidance_scale: float = 7.5,
  766. negative_promptA: Optional[Union[str, List[str]]] = None,
  767. negative_promptB: Optional[Union[str, List[str]]] = None,
  768. num_images_per_prompt: Optional[int] = 1,
  769. eta: float = 0.0,
  770. generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
  771. latents: Optional[torch.FloatTensor] = None,
  772. prompt_embeds: Optional[torch.FloatTensor] = None,
  773. negative_prompt_embeds: Optional[torch.FloatTensor] = None,
  774. output_type: Optional[str] = "pil",
  775. return_dict: bool = True,
  776. callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
  777. callback_steps: int = 1,
  778. cross_attention_kwargs: Optional[Dict[str, Any]] = None,
  779. task_class: Union[torch.Tensor, float, int] = None,
  780. ):
  781. r"""
  782. The call function to the pipeline for generation.
  783. Args:
  784. prompt (`str` or `List[str]`, *optional*):
  785. The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
  786. image (`PIL.Image.Image`):
  787. `Image` or tensor representing an image batch to be inpainted (which parts of the image to be masked
  788. out with `mask_image` and repainted according to `prompt`).
  789. mask_image (`PIL.Image.Image`):
  790. `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted
  791. while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel
  792. (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the
  793. expected shape would be `(B, H, W, 1)`.
  794. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
  795. The height in pixels of the generated image.
  796. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
  797. The width in pixels of the generated image.
  798. strength (`float`, *optional*, defaults to 1.0):
  799. Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
  800. starting point and more noise is added the higher the `strength`. The number of denoising steps depends
  801. on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
  802. process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
  803. essentially ignores `image`.
  804. num_inference_steps (`int`, *optional*, defaults to 50):
  805. The number of denoising steps. More denoising steps usually lead to a higher quality image at the
  806. expense of slower inference. This parameter is modulated by `strength`.
  807. guidance_scale (`float`, *optional*, defaults to 7.5):
  808. A higher guidance scale value encourages the model to generate images closely linked to the text
  809. `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
  810. negative_prompt (`str` or `List[str]`, *optional*):
  811. The prompt or prompts to guide what to not include in image generation. If not defined, you need to
  812. pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
  813. num_images_per_prompt (`int`, *optional*, defaults to 1):
  814. The number of images to generate per prompt.
  815. eta (`float`, *optional*, defaults to 0.0):
  816. Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
  817. to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
  818. generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
  819. A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
  820. generation deterministic.
  821. latents (`torch.FloatTensor`, *optional*):
  822. Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
  823. generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
  824. tensor is generated by sampling using the supplied random `generator`.
  825. prompt_embeds (`torch.FloatTensor`, *optional*):
  826. Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
  827. provided, text embeddings are generated from the `prompt` input argument.
  828. negative_prompt_embeds (`torch.FloatTensor`, *optional*):
  829. Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
  830. not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
  831. output_type (`str`, *optional*, defaults to `"pil"`):
  832. The output format of the generated image. Choose between `PIL.Image` or `np.array`.
  833. return_dict (`bool`, *optional*, defaults to `True`):
  834. Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
  835. plain tuple.
  836. callback (`Callable`, *optional*):
  837. A function that calls every `callback_steps` steps during inference. The function is called with the
  838. following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
  839. callback_steps (`int`, *optional*, defaults to 1):
  840. The frequency at which the `callback` function is called. If not specified, the callback is called at
  841. every step.
  842. cross_attention_kwargs (`dict`, *optional*):
  843. A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
  844. [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
  845. Examples:
  846. ```py
  847. >>> import PIL
  848. >>> import requests
  849. >>> import torch
  850. >>> from io import BytesIO
  851. >>> from diffusers import StableDiffusionInpaintPipeline
  852. >>> def download_image(url):
  853. ... response = requests.get(url)
  854. ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
  855. >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
  856. >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
  857. >>> init_image = download_image(img_url).resize((512, 512))
  858. >>> mask_image = download_image(mask_url).resize((512, 512))
  859. >>> pipe = StableDiffusionInpaintPipeline.from_pretrained(
  860. ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16
  861. ... )
  862. >>> pipe = pipe.to("cuda")
  863. >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
  864. >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
  865. ```
  866. Returns:
  867. [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
  868. If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
  869. otherwise a `tuple` is returned where the first element is a list with the generated images and the
  870. second element is a list of `bool`s indicating whether the corresponding generated image contains
  871. "not-safe-for-work" (nsfw) content.
  872. """
  873. # 0. Default height and width to unet
  874. height = height or self.unet.config.sample_size * self.vae_scale_factor
  875. width = width or self.unet.config.sample_size * self.vae_scale_factor
  876. prompt = promptA
  877. negative_prompt = negative_promptA
  878. # 1. Check inputs
  879. self.check_inputs(
  880. prompt,
  881. height,
  882. width,
  883. strength,
  884. callback_steps,
  885. negative_prompt,
  886. prompt_embeds,
  887. negative_prompt_embeds,
  888. )
  889. # 2. Define call parameters
  890. if prompt is not None and isinstance(prompt, str):
  891. batch_size = 1
  892. elif prompt is not None and isinstance(prompt, list):
  893. batch_size = len(prompt)
  894. else:
  895. batch_size = prompt_embeds.shape[0]
  896. device = self._execution_device
  897. # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
  898. # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
  899. # corresponds to doing no classifier free guidance.
  900. do_classifier_free_guidance = guidance_scale > 1.0
  901. # 3. Encode input prompt
  902. text_encoder_lora_scale = (
  903. cross_attention_kwargs.get("scale", None)
  904. if cross_attention_kwargs is not None
  905. else None
  906. )
  907. prompt_embeds = self._encode_prompt(
  908. promptA,
  909. promptB,
  910. tradoff,
  911. device,
  912. num_images_per_prompt,
  913. do_classifier_free_guidance,
  914. negative_promptA,
  915. negative_promptB,
  916. tradoff_nag,
  917. prompt_embeds=prompt_embeds,
  918. negative_prompt_embeds=negative_prompt_embeds,
  919. lora_scale=text_encoder_lora_scale,
  920. )
  921. # 4. set timesteps
  922. self.scheduler.set_timesteps(num_inference_steps, device=device)
  923. timesteps, num_inference_steps = self.get_timesteps(
  924. num_inference_steps=num_inference_steps, strength=strength, device=device
  925. )
  926. # check that number of inference steps is not < 1 - as this doesn't make sense
  927. if num_inference_steps < 1:
  928. raise ValueError(
  929. f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
  930. f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
  931. )
  932. # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
  933. latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
  934. # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
  935. is_strength_max = strength == 1.0
  936. # 5. Preprocess mask and image
  937. mask, masked_image, init_image = prepare_mask_and_masked_image(
  938. image, mask_image, height, width, return_image=True
  939. )
  940. mask_condition = mask.clone()
  941. # 6. Prepare latent variables
  942. num_channels_latents = self.vae.config.latent_channels
  943. num_channels_unet = self.unet.config.in_channels
  944. return_image_latents = num_channels_unet == 4
  945. latents_outputs = self.prepare_latents(
  946. batch_size * num_images_per_prompt,
  947. num_channels_latents,
  948. height,
  949. width,
  950. prompt_embeds.dtype,
  951. device,
  952. generator,
  953. latents,
  954. image=init_image,
  955. timestep=latent_timestep,
  956. is_strength_max=is_strength_max,
  957. return_noise=True,
  958. return_image_latents=return_image_latents,
  959. )
  960. if return_image_latents:
  961. latents, noise, image_latents = latents_outputs
  962. else:
  963. latents, noise = latents_outputs
  964. # 7. Prepare mask latent variables
  965. mask, masked_image_latents = self.prepare_mask_latents(
  966. mask,
  967. masked_image,
  968. batch_size * num_images_per_prompt,
  969. height,
  970. width,
  971. prompt_embeds.dtype,
  972. device,
  973. generator,
  974. do_classifier_free_guidance,
  975. )
  976. # 8. Check that sizes of mask, masked image and latents match
  977. if num_channels_unet == 9:
  978. # default case for runwayml/stable-diffusion-inpainting
  979. num_channels_mask = mask.shape[1]
  980. num_channels_masked_image = masked_image_latents.shape[1]
  981. if (
  982. num_channels_latents + num_channels_mask + num_channels_masked_image
  983. != self.unet.config.in_channels
  984. ):
  985. raise ValueError(
  986. f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
  987. f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
  988. f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
  989. f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
  990. " `pipeline.unet` or your `mask_image` or `image` input."
  991. )
  992. elif num_channels_unet != 4:
  993. raise ValueError(
  994. f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
  995. )
  996. # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
  997. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
  998. # 10. Denoising loop
  999. num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
  1000. with self.progress_bar(total=num_inference_steps) as progress_bar:
  1001. for i, t in enumerate(timesteps):
  1002. # expand the latents if we are doing classifier free guidance
  1003. latent_model_input = (
  1004. torch.cat([latents] * 2) if do_classifier_free_guidance else latents
  1005. )
  1006. # concat latents, mask, masked_image_latents in the channel dimension
  1007. latent_model_input = self.scheduler.scale_model_input(
  1008. latent_model_input, t
  1009. )
  1010. if num_channels_unet == 9:
  1011. latent_model_input = torch.cat(
  1012. [latent_model_input, mask, masked_image_latents], dim=1
  1013. )
  1014. # predict the noise residual
  1015. if task_class is not None:
  1016. noise_pred = self.unet(
  1017. sample=latent_model_input,
  1018. timestep=t,
  1019. encoder_hidden_states=prompt_embeds,
  1020. cross_attention_kwargs=cross_attention_kwargs,
  1021. return_dict=False,
  1022. task_class=task_class,
  1023. )[0]
  1024. else:
  1025. noise_pred = self.unet(
  1026. latent_model_input,
  1027. t,
  1028. encoder_hidden_states=prompt_embeds,
  1029. cross_attention_kwargs=cross_attention_kwargs,
  1030. return_dict=False,
  1031. )[0]
  1032. # perform guidance
  1033. if do_classifier_free_guidance:
  1034. noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
  1035. noise_pred = noise_pred_uncond + guidance_scale * (
  1036. noise_pred_text - noise_pred_uncond
  1037. )
  1038. # compute the previous noisy sample x_t -> x_t-1
  1039. latents = self.scheduler.step(
  1040. noise_pred, t, latents, **extra_step_kwargs, return_dict=False
  1041. )[0]
  1042. if num_channels_unet == 4:
  1043. init_latents_proper = image_latents[:1]
  1044. init_mask = mask[:1]
  1045. if i < len(timesteps) - 1:
  1046. noise_timestep = timesteps[i + 1]
  1047. init_latents_proper = self.scheduler.add_noise(
  1048. init_latents_proper, noise, torch.tensor([noise_timestep])
  1049. )
  1050. latents = (
  1051. 1 - init_mask
  1052. ) * init_latents_proper + init_mask * latents
  1053. # call the callback, if provided
  1054. if i == len(timesteps) - 1 or (
  1055. (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
  1056. ):
  1057. progress_bar.update()
  1058. if callback is not None and i % callback_steps == 0:
  1059. callback(self, i, t, {})
  1060. if not output_type == "latent":
  1061. condition_kwargs = {}
  1062. if isinstance(self.vae, AsymmetricAutoencoderKL):
  1063. init_image = init_image.to(
  1064. device=device, dtype=masked_image_latents.dtype
  1065. )
  1066. init_image_condition = init_image.clone()
  1067. init_image = self._encode_vae_image(init_image, generator=generator)
  1068. mask_condition = mask_condition.to(
  1069. device=device, dtype=masked_image_latents.dtype
  1070. )
  1071. condition_kwargs = {
  1072. "image": init_image_condition,
  1073. "mask": mask_condition,
  1074. }
  1075. image = self.vae.decode(
  1076. latents / self.vae.config.scaling_factor,
  1077. return_dict=False,
  1078. **condition_kwargs,
  1079. )[0]
  1080. image, has_nsfw_concept = self.run_safety_checker(
  1081. image, device, prompt_embeds.dtype
  1082. )
  1083. else:
  1084. image = latents
  1085. has_nsfw_concept = None
  1086. if has_nsfw_concept is None:
  1087. do_denormalize = [True] * image.shape[0]
  1088. else:
  1089. do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
  1090. image = self.image_processor.postprocess(
  1091. image, output_type=output_type, do_denormalize=do_denormalize
  1092. )
  1093. # Offload last model to CPU
  1094. if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
  1095. self.final_offload_hook.offload()
  1096. if not return_dict:
  1097. return (image, has_nsfw_concept)
  1098. return StableDiffusionPipelineOutput(
  1099. images=image, nsfw_content_detected=has_nsfw_concept
  1100. )