Buckets:

rtrm's picture
download
raw
37.8 kB
import{s as Re,o as Oe,n as Ve}from"../chunks/scheduler.53228c21.js";import{S as Se,i as qe,e as i,s as a,c as _,h as Ye,a as r,d as n,b as s,f as K,g as b,j as d,k as F,l as m,m as o,n as x,t as y,o as v,p as w}from"../chunks/index.100fac89.js";import{C as Qe}from"../chunks/CopyLLMTxtMenu.ed0e3681.js";import{D as ve}from"../chunks/Docstring.1305e0ff.js";import{C as Ke}from"../chunks/CodeBlock.d30a6509.js";import{E as et}from"../chunks/ExampleCodeBlock.886a76e1.js";import{H as Fe,E as tt}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.dd42f483.js";function nt(te){let p,P="Examples:",C,g,f;return g=new Ke({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBGbHV4Q29udHJvbE5ldFBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEZsdXhDb250cm9sTmV0TW9kZWwlMEElMEFiYXNlX21vZGVsJTIwJTNEJTIwJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtZGV2JTIyJTBBY29udHJvbG5ldF9tb2RlbCUyMCUzRCUyMCUyMkluc3RhbnRYJTJGRkxVWC4xLWRldi1jb250cm9sbmV0LWNhbm55JTIyJTBBY29udHJvbG5ldCUyMCUzRCUyMEZsdXhDb250cm9sTmV0TW9kZWwuZnJvbV9wcmV0cmFpbmVkKGNvbnRyb2xuZXRfbW9kZWwlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUlMjAlM0QlMjBGbHV4Q29udHJvbE5ldFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjBiYXNlX21vZGVsJTJDJTIwY29udHJvbG5ldCUzRGNvbnRyb2xuZXQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEFjb250cm9sX2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGSW5zdGFudFglMkZTRDMtQ29udHJvbG5ldC1DYW5ueSUyRnJlc29sdmUlMkZtYWluJTJGY2FubnkuanBnJTIyKSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjBnaXJsJTIwaW4lMjBjaXR5JTJDJTIwMjUlMjB5ZWFycyUyMG9sZCUyQyUyMGNvb2wlMkMlMjBmdXR1cmlzdGljJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb250cm9sX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbF9ndWlkYW5jZV9zdGFydCUzRDAuMiUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfZ3VpZGFuY2VfZW5kJTNEMC44JTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbG5ldF9jb25kaXRpb25pbmdfc2NhbGUlM0QxLjAlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMjglMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDMuNSUyQyUwQSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyZmx1eC5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlNetPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlNetModel
<span class="hljs-meta">&gt;&gt;&gt; </span>base_model = <span class="hljs-string">&quot;black-forest-labs/FLUX.1-dev&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>controlnet_model = <span class="hljs-string">&quot;InstantX/FLUX.1-dev-controlnet-canny&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = FluxControlNetPipeline.from_pretrained(
<span class="hljs-meta">... </span> base_model, controlnet=controlnet, torch_dtype=torch.bfloat16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>control_image = load_image(<span class="hljs-string">&quot;https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;A girl in city, 25 years old, cool, futuristic&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(
<span class="hljs-meta">... </span> prompt,
<span class="hljs-meta">... </span> control_image=control_image,
<span class="hljs-meta">... </span> control_guidance_start=<span class="hljs-number">0.2</span>,
<span class="hljs-meta">... </span> control_guidance_end=<span class="hljs-number">0.8</span>,
<span class="hljs-meta">... </span> controlnet_conditioning_scale=<span class="hljs-number">1.0</span>,
<span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">28</span>,
<span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">3.5</span>,
<span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>]
<span class="hljs-meta">&gt;&gt;&gt; </span>image.save(<span class="hljs-string">&quot;flux.png&quot;</span>)`,wrap:!1}}),{c(){p=i("p"),p.textContent=P,C=a(),_(g.$$.fragment)},l(l){p=r(l,"P",{"data-svelte-h":!0}),d(p)!=="svelte-kvfsh7"&&(p.textContent=P),C=s(l),b(g.$$.fragment,l)},m(l,h){o(l,p,h),o(l,C,h),x(g,l,h),f=!0},p:Ve,i(l){f||(y(g.$$.fragment,l),f=!0)},o(l){v(g.$$.fragment,l),f=!1},d(l){l&&(n(p),n(C)),w(g,l)}}}function ot(te){let p,P,C,g,f,l,h,ne,N,Pe='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',oe,k,ke="FluxControlNetPipeline is an implementation of ControlNet for Flux.1.",ae,$,$e='ControlNet was introduced in <a href="https://huggingface.co/papers/2302.05543" rel="nofollow">Adding Conditional Control to Text-to-Image Diffusion Models</a> by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.',se,j,je="With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that’ll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.",ie,U,Ue="The abstract from the paper is:",re,J,Je="<em>We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with “zero convolutions” (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (&lt;50k) and large (&gt;1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.</em>",le,X,Xe='This controlnet code is implemented by <a href="https://huggingface.co/InstantX" rel="nofollow">The InstantX Team</a>. You can find pre-trained checkpoints for Flux-ControlNet in the table below:',pe,z,ze='<thead><tr><th>ControlNet type</th> <th>Developer</th> <th>Link</th></tr></thead> <tbody><tr><td>Canny</td> <td><a href="https://huggingface.co/InstantX" rel="nofollow">The InstantX Team</a></td> <td><a href="https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Canny" rel="nofollow">Link</a></td></tr> <tr><td>Depth</td> <td><a href="https://huggingface.co/InstantX" rel="nofollow">The InstantX Team</a></td> <td><a href="https://huggingface.co/Shakker-Labs/FLUX.1-dev-ControlNet-Depth" rel="nofollow">Link</a></td></tr> <tr><td>Union</td> <td><a href="https://huggingface.co/InstantX" rel="nofollow">The InstantX Team</a></td> <td><a href="https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union" rel="nofollow">Link</a></td></tr></tbody>',de,E,Ee='XLabs ControlNets are also supported, which was contributed by the <a href="https://huggingface.co/XLabs-AI" rel="nofollow">XLabs team</a>.',ce,G,Ge='<thead><tr><th>ControlNet type</th> <th>Developer</th> <th>Link</th></tr></thead> <tbody><tr><td>Canny</td> <td><a href="https://huggingface.co/XLabs-AI" rel="nofollow">The XLabs Team</a></td> <td><a href="https://huggingface.co/XLabs-AI/flux-controlnet-canny-diffusers" rel="nofollow">Link</a></td></tr> <tr><td>Depth</td> <td><a href="https://huggingface.co/XLabs-AI" rel="nofollow">The XLabs Team</a></td> <td><a href="https://huggingface.co/XLabs-AI/flux-controlnet-depth-diffusers" rel="nofollow">Link</a></td></tr> <tr><td>HED</td> <td><a href="https://huggingface.co/XLabs-AI" rel="nofollow">The XLabs Team</a></td> <td><a href="https://huggingface.co/XLabs-AI/flux-controlnet-hed-diffusers" rel="nofollow">Link</a></td></tr></tbody>',me,M,Ze='<p>Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.</p>',ue,Z,ge,c,A,we,O,Ae="The Flux pipeline for text-to-image generation.",Ce,V,We='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',Te,T,W,Le,S,De="Function invoked when calling the pipeline for generation.",Ne,I,Me,q,D,fe,B,he,L,H,Ie,Y,Be="Output class for Flux image generation pipelines.",_e,R,be,ee,xe;return f=new Qe({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),h=new Fe({props:{title:"ControlNet with Flux.1",local:"controlnet-with-flux1",headingTag:"h1"}}),Z=new Fe({props:{title:"FluxControlNetPipeline",local:"diffusers.FluxControlNetPipeline",headingTag:"h2"}}),A=new ve({props:{name:"class diffusers.FluxControlNetPipeline",anchor:"diffusers.FluxControlNetPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"},{name:"controlnet",val:": typing.Union[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel, typing.List[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel], typing.Tuple[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel], diffusers.models.controlnets.controlnet_flux.FluxMultiControlNetModel]"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"}],parametersDescription:[{anchor:"diffusers.FluxControlNetPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) &#x2014;
Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxControlNetPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) &#x2014;
A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxControlNetPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxControlNetPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) &#x2014;
<a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically
the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxControlNetPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) &#x2014;
<a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically
the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxControlNetPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) &#x2014;
Tokenizer of class
<a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxControlNetPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) &#x2014;
Second Tokenizer of class
<a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py#L177"}}),W=new ve({props:{name:"__call__",anchor:"diffusers.FluxControlNetPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"true_cfg_scale",val:": float = 1.0"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 28"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"control_guidance_start",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"control_guidance_end",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"control_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"control_mode",val:": typing.Union[int, typing.List[int], NoneType] = None"},{name:"controlnet_conditioning_scale",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"negative_ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"negative_pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxControlNetPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.FluxControlNetPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to be sent to <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is
will be used instead`,name:"prompt_2"},{anchor:"diffusers.FluxControlNetPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.FluxControlNetPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.FluxControlNetPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxControlNetPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.FluxControlNetPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.FluxControlNetPipeline.__call__.control_guidance_start",description:`<strong>control_guidance_start</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 0.0) &#x2014;
The percentage of total steps at which the ControlNet starts applying.`,name:"control_guidance_start"},{anchor:"diffusers.FluxControlNetPipeline.__call__.control_guidance_end",description:`<strong>control_guidance_end</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 1.0) &#x2014;
The percentage of total steps at which the ControlNet stops applying.`,name:"control_guidance_end"},{anchor:"diffusers.FluxControlNetPipeline.__call__.control_image",description:`<strong>control_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, &#x2014;
<code>List[List[torch.Tensor]]</code>, <code>List[List[np.ndarray]]</code> or <code>List[List[PIL.Image.Image]]</code>):
The ControlNet input condition to provide guidance to the <code>unet</code> for generation. If the type is
specified as <code>torch.Tensor</code>, it is passed to ControlNet as is. <code>PIL.Image.Image</code> can also be accepted
as an image. The dimensions of the output image defaults to <code>image</code>&#x2019;s dimensions. If height and/or
width are passed, <code>image</code> is resized accordingly. If multiple ControlNets are specified in <code>init</code>,
images must be passed as a list such that each element of the list can be correctly batched for input
to a single ControlNet.`,name:"control_image"},{anchor:"diffusers.FluxControlNetPipeline.__call__.controlnet_conditioning_scale",description:`<strong>controlnet_conditioning_scale</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 1.0) &#x2014;
The outputs of the ControlNet are multiplied by <code>controlnet_conditioning_scale</code> before they are added
to the residual in the original <code>unet</code>. If multiple ControlNets are specified in <code>init</code>, you can set
the corresponding scale as a list.`,name:"controlnet_conditioning_scale"},{anchor:"diffusers.FluxControlNetPipeline.__call__.control_mode",description:`<strong>control_mode</strong> (<code>int</code> or <code>List[int]</code>,, <em>optional</em>, defaults to None) &#x2014;
The control mode when applying ControlNet-Union.`,name:"control_mode"},{anchor:"diffusers.FluxControlNetPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlNetPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxControlNetPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FluxControlNetPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlNetPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.
If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlNetPipeline.__call__.ip_adapter_image",description:"<strong>ip_adapter_image</strong> &#x2014; (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.",name:"ip_adapter_image"},{anchor:"diffusers.FluxControlNetPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.FluxControlNetPipeline.__call__.negative_ip_adapter_image",description:`<strong>negative_ip_adapter_image</strong> &#x2014;
(<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_ip_adapter_image"},{anchor:"diffusers.FluxControlNetPipeline.__call__.negative_ip_adapter_image_embeds",description:`<strong>negative_ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"negative_ip_adapter_image_embeds"},{anchor:"diffusers.FluxControlNetPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxControlNetPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxControlNetPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxControlNetPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by
<code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxControlNetPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxControlNetPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) &#x2014; Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py#L677",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code>
is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated
images.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p>
`}}),I=new et({props:{anchor:"diffusers.FluxControlNetPipeline.__call__.example",$$slots:{default:[nt]},$$scope:{ctx:te}}}),D=new ve({props:{name:"encode_prompt",anchor:"diffusers.FluxControlNetPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxControlNetPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxControlNetPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is
used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxControlNetPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>):
torch device`,name:"device"},{anchor:"diffusers.FluxControlNetPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlNetPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlNetPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) &#x2014;
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.
If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlNetPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.FluxControlNetPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) &#x2014;
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py#L341"}}),B=new Fe({props:{title:"FluxPipelineOutput",local:"diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput",headingTag:"h2"}}),H=new ve({props:{name:"class diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput",anchor:"diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>torch.Tensor</code> or <code>np.ndarray</code>) &#x2014;
List of denoised PIL images of length <code>batch_size</code> or numpy array or torch tensor of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion
pipeline. Torch tensors can represent either the denoised images or the intermediate latents ready to be
passed to the decoder.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_output.py#L12"}}),R=new tt({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/controlnet_flux.md"}}),{c(){p=i("meta"),P=a(),C=i("p"),g=a(),_(f.$$.fragment),l=a(),_(h.$$.fragment),ne=a(),N=i("div"),N.innerHTML=Pe,oe=a(),k=i("p"),k.textContent=ke,ae=a(),$=i("p"),$.innerHTML=$e,se=a(),j=i("p"),j.textContent=je,ie=a(),U=i("p"),U.textContent=Ue,re=a(),J=i("p"),J.innerHTML=Je,le=a(),X=i("p"),X.innerHTML=Xe,pe=a(),z=i("table"),z.innerHTML=ze,de=a(),E=i("p"),E.innerHTML=Ee,ce=a(),G=i("table"),G.innerHTML=Ge,me=a(),M=i("blockquote"),M.innerHTML=Ze,ue=a(),_(Z.$$.fragment),ge=a(),c=i("div"),_(A.$$.fragment),we=a(),O=i("p"),O.textContent=Ae,Ce=a(),V=i("p"),V.innerHTML=We,Te=a(),T=i("div"),_(W.$$.fragment),Le=a(),S=i("p"),S.textContent=De,Ne=a(),_(I.$$.fragment),Me=a(),q=i("div"),_(D.$$.fragment),fe=a(),_(B.$$.fragment),he=a(),L=i("div"),_(H.$$.fragment),Ie=a(),Y=i("p"),Y.textContent=Be,_e=a(),_(R.$$.fragment),be=a(),ee=i("p"),this.h()},l(e){const t=Ye("svelte-u9bgzb",document.head);p=r(t,"META",{name:!0,content:!0}),t.forEach(n),P=s(e),C=r(e,"P",{}),K(C).forEach(n),g=s(e),b(f.$$.fragment,e),l=s(e),b(h.$$.fragment,e),ne=s(e),N=r(e,"DIV",{class:!0,"data-svelte-h":!0}),d(N)!=="svelte-si9ct8"&&(N.innerHTML=Pe),oe=s(e),k=r(e,"P",{"data-svelte-h":!0}),d(k)!=="svelte-dwhr38"&&(k.textContent=ke),ae=s(e),$=r(e,"P",{"data-svelte-h":!0}),d($)!=="svelte-1v2xz23"&&($.innerHTML=$e),se=s(e),j=r(e,"P",{"data-svelte-h":!0}),d(j)!=="svelte-1dn0wji"&&(j.textContent=je),ie=s(e),U=r(e,"P",{"data-svelte-h":!0}),d(U)!=="svelte-1cwsb16"&&(U.textContent=Ue),re=s(e),J=r(e,"P",{"data-svelte-h":!0}),d(J)!=="svelte-fbiw6t"&&(J.innerHTML=Je),le=s(e),X=r(e,"P",{"data-svelte-h":!0}),d(X)!=="svelte-11rq5wn"&&(X.innerHTML=Xe),pe=s(e),z=r(e,"TABLE",{"data-svelte-h":!0}),d(z)!=="svelte-14pvbp"&&(z.innerHTML=ze),de=s(e),E=r(e,"P",{"data-svelte-h":!0}),d(E)!=="svelte-wq0siu"&&(E.innerHTML=Ee),ce=s(e),G=r(e,"TABLE",{"data-svelte-h":!0}),d(G)!=="svelte-1180asw"&&(G.innerHTML=Ge),me=s(e),M=r(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(M)!=="svelte-r1jcqf"&&(M.innerHTML=Ze),ue=s(e),b(Z.$$.fragment,e),ge=s(e),c=r(e,"DIV",{class:!0});var u=K(c);b(A.$$.fragment,u),we=s(u),O=r(u,"P",{"data-svelte-h":!0}),d(O)!=="svelte-77uxl4"&&(O.textContent=Ae),Ce=s(u),V=r(u,"P",{"data-svelte-h":!0}),d(V)!=="svelte-mxgguy"&&(V.innerHTML=We),Te=s(u),T=r(u,"DIV",{class:!0});var Q=K(T);b(W.$$.fragment,Q),Le=s(Q),S=r(Q,"P",{"data-svelte-h":!0}),d(S)!=="svelte-v78lg8"&&(S.textContent=De),Ne=s(Q),b(I.$$.fragment,Q),Q.forEach(n),Me=s(u),q=r(u,"DIV",{class:!0});var He=K(q);b(D.$$.fragment,He),He.forEach(n),u.forEach(n),fe=s(e),b(B.$$.fragment,e),he=s(e),L=r(e,"DIV",{class:!0});var ye=K(L);b(H.$$.fragment,ye),Ie=s(ye),Y=r(ye,"P",{"data-svelte-h":!0}),d(Y)!=="svelte-10l6oeh"&&(Y.textContent=Be),ye.forEach(n),_e=s(e),b(R.$$.fragment,e),be=s(e),ee=r(e,"P",{}),K(ee).forEach(n),this.h()},h(){F(p,"name","hf:doc:metadata"),F(p,"content",at),F(N,"class","flex flex-wrap space-x-1"),F(M,"class","tip"),F(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),F(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),F(c,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),F(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){m(document.head,p),o(e,P,t),o(e,C,t),o(e,g,t),x(f,e,t),o(e,l,t),x(h,e,t),o(e,ne,t),o(e,N,t),o(e,oe,t),o(e,k,t),o(e,ae,t),o(e,$,t),o(e,se,t),o(e,j,t),o(e,ie,t),o(e,U,t),o(e,re,t),o(e,J,t),o(e,le,t),o(e,X,t),o(e,pe,t),o(e,z,t),o(e,de,t),o(e,E,t),o(e,ce,t),o(e,G,t),o(e,me,t),o(e,M,t),o(e,ue,t),x(Z,e,t),o(e,ge,t),o(e,c,t),x(A,c,null),m(c,we),m(c,O),m(c,Ce),m(c,V),m(c,Te),m(c,T),x(W,T,null),m(T,Le),m(T,S),m(T,Ne),x(I,T,null),m(c,Me),m(c,q),x(D,q,null),o(e,fe,t),x(B,e,t),o(e,he,t),o(e,L,t),x(H,L,null),m(L,Ie),m(L,Y),o(e,_e,t),x(R,e,t),o(e,be,t),o(e,ee,t),xe=!0},p(e,[t]){const u={};t&2&&(u.$$scope={dirty:t,ctx:e}),I.$set(u)},i(e){xe||(y(f.$$.fragment,e),y(h.$$.fragment,e),y(Z.$$.fragment,e),y(A.$$.fragment,e),y(W.$$.fragment,e),y(I.$$.fragment,e),y(D.$$.fragment,e),y(B.$$.fragment,e),y(H.$$.fragment,e),y(R.$$.fragment,e),xe=!0)},o(e){v(f.$$.fragment,e),v(h.$$.fragment,e),v(Z.$$.fragment,e),v(A.$$.fragment,e),v(W.$$.fragment,e),v(I.$$.fragment,e),v(D.$$.fragment,e),v(B.$$.fragment,e),v(H.$$.fragment,e),v(R.$$.fragment,e),xe=!1},d(e){e&&(n(P),n(C),n(g),n(l),n(ne),n(N),n(oe),n(k),n(ae),n($),n(se),n(j),n(ie),n(U),n(re),n(J),n(le),n(X),n(pe),n(z),n(de),n(E),n(ce),n(G),n(me),n(M),n(ue),n(ge),n(c),n(fe),n(he),n(L),n(_e),n(be),n(ee)),n(p),w(f,e),w(h,e),w(Z,e),w(A),w(W),w(I),w(D),w(B,e),w(H),w(R,e)}}}const at='{"title":"ControlNet with Flux.1","local":"controlnet-with-flux1","sections":[{"title":"FluxControlNetPipeline","local":"diffusers.FluxControlNetPipeline","sections":[],"depth":2},{"title":"FluxPipelineOutput","local":"diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput","sections":[],"depth":2}],"depth":1}';function st(te){return Oe(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ut extends Se{constructor(p){super(),qe(this,p,st,ot,Re,{})}}export{ut as component};

Xet Storage Details

Size:
37.8 kB
·
Xet hash:
d4455d50ddc298915332e73d842662a170a48694c723ee4ba78a23dff65f87ea

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.