diff --git a/examples/community/README.md b/examples/community/README.md index 8f70e004e5fd..3f92f202d8c4 100644 --- a/examples/community/README.md +++ b/examples/community/README.md @@ -355,7 +355,7 @@ generator = th.Generator("cuda").manual_seed(0) seed = 0 prompt = "a forest | a camel" -weights = " 1 | 1" # Equal weight to each prompt. Cna be negative +weights = " 1 | 1" # Equal weight to each prompt. Can be negative images = [] for i in range(4): diff --git a/examples/community/composable_stable_diffusion.py b/examples/community/composable_stable_diffusion.py index 308c18287e02..10d34d255a20 100644 --- a/examples/community/composable_stable_diffusion.py +++ b/examples/community/composable_stable_diffusion.py @@ -133,7 +133,7 @@ def __call__( tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`. + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. @@ -264,7 +264,7 @@ def __call__( self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) - # if we use LMSDiscreteScheduler, let's make sure latents are mulitplied by sigmas + # if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents * self.scheduler.sigmas[0] diff --git a/examples/community/lpw_stable_diffusion.py b/examples/community/lpw_stable_diffusion.py index 4980f8c8be06..1b2279de720c 100644 --- a/examples/community/lpw_stable_diffusion.py +++ b/examples/community/lpw_stable_diffusion.py @@ -40,7 +40,7 @@ def parse_prompt_attention(text): """ - Parses a string with attention tokens and returns a list of pairs: text and its assoicated weight. + Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 @@ -237,9 +237,9 @@ def get_weighted_text_embeddings( r""" Prompts can be assigned with local weights using brackets. For example, prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful', - and the embedding tokens corresponding to the words get multipled by a constant, 1.1. + and the embedding tokens corresponding to the words get multiplied by a constant, 1.1. - Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the origional mean. + Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean. Args: pipe (`DiffusionPipeline`): diff --git a/examples/community/lpw_stable_diffusion_onnx.py b/examples/community/lpw_stable_diffusion_onnx.py index 4ca37c0c4ad4..37f03c86f29d 100644 --- a/examples/community/lpw_stable_diffusion_onnx.py +++ b/examples/community/lpw_stable_diffusion_onnx.py @@ -38,7 +38,7 @@ def parse_prompt_attention(text): """ - Parses a string with attention tokens and returns a list of pairs: text and its assoicated weight. + Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 @@ -236,9 +236,9 @@ def get_weighted_text_embeddings( r""" Prompts can be assigned with local weights using brackets. For example, prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful', - and the embedding tokens corresponding to the words get multipled by a constant, 1.1. + and the embedding tokens corresponding to the words get multiplied by a constant, 1.1. - Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the origional mean. + Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean. Args: pipe (`DiffusionPipeline`): diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index 894505654e47..f31ef6361b3b 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -570,7 +570,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P def components(self) -> Dict[str, Any]: r""" - The `self.compenents` property can be useful to run different pipelines with the same weights and + The `self.components` property can be useful to run different pipelines with the same weights and configurations to not have to re-allocate memory. Examples: @@ -588,7 +588,7 @@ def components(self) -> Dict[str, Any]: ``` Returns: - A dictionaly containing all the modules needed to initialize the pipleline. + A dictionaly containing all the modules needed to initialize the pipeline. """ components = {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")} expected_modules = set(inspect.signature(self.__init__).parameters.keys()) - set(["self"])