Buckets:

rtrm's picture
download
raw
197 kB
import{s as Ea,o as Na,n as Kt}from"../chunks/scheduler.8c3d61f6.js";import{S as Qa,i as $a,g as i,s as a,r,A as Ya,h as p,f as n,c as l,j as T,u as c,x as y,k as U,y as h,a as s,v as d,d as m,t as g,w as u}from"../chunks/index.da70eac4.js";import{D as Z}from"../chunks/Docstring.2187c15d.js";import{C as J}from"../chunks/CodeBlock.a9c4becf.js";import{E as At}from"../chunks/ExampleCodeBlock.56cd5e98.js";import{H as v,E as Pa}from"../chunks/getInferenceSnippets.676f6ee5.js";function Ha(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGUGlwZWxpbmUlMkMlMjBJRlN1cGVyUmVzb2x1dGlvblBpcGVsaW5lJTJDJTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwcHRfdG9fcGlsJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlJTIwJTNEJTIwSUZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCdhJTIwcGhvdG8lMjBvZiUyMGElMjBrYW5nYXJvbyUyMHdlYXJpbmclMjBhbiUyMG9yYW5nZSUyMGhvb2RpZSUyMGFuZCUyMGJsdWUlMjBzdW5nbGFzc2VzJTIwc3RhbmRpbmclMjBpbiUyMGZyb250JTIwb2YlMjB0aGUlMjBlaWZmZWwlMjB0b3dlciUyMGhvbGRpbmclMjBhJTIwc2lnbiUyMHRoYXQlMjBzYXlzJTIwJTIydmVyeSUyMGRlZXAlMjBsZWFybmluZyUyMiclMEFwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0X2VtYmVkcyUzRHByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9wcm9tcHRfZW1iZWRzJTNEbmVnYXRpdmVfZW1iZWRzJTJDJTIwb3V0cHV0X3R5cGUlM0QlMjJwdCUyMikuaW1hZ2VzJTBBJTBBJTIzJTIwc2F2ZSUyMGludGVybWVkaWF0ZSUyMGltYWdlJTBBcGlsX2ltYWdlJTIwJTNEJTIwcHRfdG9fcGlsKGltYWdlKSUwQXBpbF9pbWFnZSU1QjAlNUQuc2F2ZSglMjIuJTJGaWZfc3RhZ2VfSS5wbmclMjIpJTBBJTBBc3VwZXJfcmVzXzFfcGlwZSUyMCUzRCUyMElGU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMV9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMV9waXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTBBKS5pbWFnZXMlMEElMEElMjMlMjBzYXZlJTIwaW50ZXJtZWRpYXRlJTIwaW1hZ2UlMEFwaWxfaW1hZ2UlMjAlM0QlMjBwdF90b19waWwoaW1hZ2UpJTBBcGlsX2ltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JLnBuZyUyMiklMEElMEFzYWZldHlfbW9kdWxlcyUyMCUzRCUyMCU3QiUwQSUyMCUyMCUyMCUyMCUyMmZlYXR1cmVfZXh0cmFjdG9yJTIyJTNBJTIwcGlwZS5mZWF0dXJlX2V4dHJhY3RvciUyQyUwQSUyMCUyMCUyMCUyMCUyMnNhZmV0eV9jaGVja2VyJTIyJTNBJTIwcGlwZS5zYWZldHlfY2hlY2tlciUyQyUwQSUyMCUyMCUyMCUyMCUyMndhdGVybWFya2VyJTIyJTNBJTIwcGlwZS53YXRlcm1hcmtlciUyQyUwQSU3RCUwQXN1cGVyX3Jlc18yX3BpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXg0LXVwc2NhbGVyJTIyJTJDJTIwKipzYWZldHlfbW9kdWxlcyUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMl9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMl9waXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = IFPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&#x27;a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says &quot;very deep learning&quot;&#x27;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type=<span class="hljs-string">&quot;pt&quot;</span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save intermediate image</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image = pt_to_pil(image)
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_I.png&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>image = super_res_1_pipe(
<span class="hljs-meta">... </span> image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type=<span class="hljs-string">&quot;pt&quot;</span>
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save intermediate image</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image = pt_to_pil(image)
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_I.png&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>safety_modules = {
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;feature_extractor&quot;</span>: pipe.feature_extractor,
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;safety_checker&quot;</span>: pipe.safety_checker,
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;watermarker&quot;</span>: pipe.watermarker,
<span class="hljs-meta">... </span>}
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_2_pipe = DiffusionPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;stabilityai/stable-diffusion-x4-upscaler&quot;</span>, **safety_modules, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_2_pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>image = super_res_2_pipe(
<span class="hljs-meta">... </span> prompt=prompt,
<span class="hljs-meta">... </span> image=image,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_II.png&quot;</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Kt,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function za(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGUGlwZWxpbmUlMkMlMjBJRlN1cGVyUmVzb2x1dGlvblBpcGVsaW5lJTJDJTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwcHRfdG9fcGlsJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlJTIwJTNEJTIwSUZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCdhJTIwcGhvdG8lMjBvZiUyMGElMjBrYW5nYXJvbyUyMHdlYXJpbmclMjBhbiUyMG9yYW5nZSUyMGhvb2RpZSUyMGFuZCUyMGJsdWUlMjBzdW5nbGFzc2VzJTIwc3RhbmRpbmclMjBpbiUyMGZyb250JTIwb2YlMjB0aGUlMjBlaWZmZWwlMjB0b3dlciUyMGhvbGRpbmclMjBhJTIwc2lnbiUyMHRoYXQlMjBzYXlzJTIwJTIydmVyeSUyMGRlZXAlMjBsZWFybmluZyUyMiclMEFwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0X2VtYmVkcyUzRHByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9wcm9tcHRfZW1iZWRzJTNEbmVnYXRpdmVfZW1iZWRzJTJDJTIwb3V0cHV0X3R5cGUlM0QlMjJwdCUyMikuaW1hZ2VzJTBBJTBBJTIzJTIwc2F2ZSUyMGludGVybWVkaWF0ZSUyMGltYWdlJTBBcGlsX2ltYWdlJTIwJTNEJTIwcHRfdG9fcGlsKGltYWdlKSUwQXBpbF9pbWFnZSU1QjAlNUQuc2F2ZSglMjIuJTJGaWZfc3RhZ2VfSS5wbmclMjIpJTBBJTBBc3VwZXJfcmVzXzFfcGlwZSUyMCUzRCUyMElGU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMV9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMV9waXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = IFPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&#x27;a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says &quot;very deep learning&quot;&#x27;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type=<span class="hljs-string">&quot;pt&quot;</span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save intermediate image</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image = pt_to_pil(image)
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_I.png&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>image = super_res_1_pipe(
<span class="hljs-meta">... </span> image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_II.png&quot;</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Kt,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function qa(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW1nMkltZ1BpcGVsaW5lJTJDJTIwSUZJbWcySW1nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQWltcG9ydCUyMHJlcXVlc3RzJTBBZnJvbSUyMGlvJTIwaW1wb3J0JTIwQnl0ZXNJTyUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZzdGFibGUtZGlmZnVzaW9uJTJGbWFpbiUyRmFzc2V0cyUyRnN0YWJsZS1zYW1wbGVzJTJGaW1nMmltZyUyRnNrZXRjaC1tb3VudGFpbnMtaW5wdXQuanBnJTIyJTBBcmVzcG9uc2UlMjAlM0QlMjByZXF1ZXN0cy5nZXQodXJsKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwSW1hZ2Uub3BlbihCeXRlc0lPKHJlc3BvbnNlLmNvbnRlbnQpKS5jb252ZXJ0KCUyMlJHQiUyMiklMEFvcmlnaW5hbF9pbWFnZSUyMCUzRCUyMG9yaWdpbmFsX2ltYWdlLnJlc2l6ZSgoNzY4JTJDJTIwNTEyKSklMEElMEFwaXBlJTIwJTNEJTIwSUZJbWcySW1nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwZmFudGFzeSUyMGxhbmRzY2FwZSUyMGluJTIwc3R5bGUlMjBtaW5lY3JhZnQlMjIlMEFwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RvcmlnaW5hbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEEpLmltYWdlcyUwQSUwQSUyMyUyMHNhdmUlMjBpbnRlcm1lZGlhdGUlMjBpbWFnZSUwQXBpbF9pbWFnZSUyMCUzRCUyMHB0X3RvX3BpbChpbWFnZSklMEFwaWxfaW1hZ2UlNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQXN1cGVyX3Jlc18xX3BpcGUlMjAlM0QlMjBJRkltZzJJbWdTdXBlclJlc29sdXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSUktTC12MS4wJTIyJTJDJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQXN1cGVyX3Jlc18xX3BpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFpbWFnZSUyMCUzRCUyMHN1cGVyX3Jlc18xX3BpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>response = requests.get(url)
<span class="hljs-meta">&gt;&gt;&gt; </span>original_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>original_image = original_image.resize((<span class="hljs-number">768</span>, <span class="hljs-number">512</span>))
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = IFImg2ImgPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>,
<span class="hljs-meta">... </span> variant=<span class="hljs-string">&quot;fp16&quot;</span>,
<span class="hljs-meta">... </span> torch_dtype=torch.float16,
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;A fantasy landscape in style minecraft&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(
<span class="hljs-meta">... </span> image=original_image,
<span class="hljs-meta">... </span> prompt_embeds=prompt_embeds,
<span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds,
<span class="hljs-meta">... </span> output_type=<span class="hljs-string">&quot;pt&quot;</span>,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save intermediate image</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image = pt_to_pil(image)
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_I.png&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>,
<span class="hljs-meta">... </span> text_encoder=<span class="hljs-literal">None</span>,
<span class="hljs-meta">... </span> variant=<span class="hljs-string">&quot;fp16&quot;</span>,
<span class="hljs-meta">... </span> torch_dtype=torch.float16,
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>image = super_res_1_pipe(
<span class="hljs-meta">... </span> image=image,
<span class="hljs-meta">... </span> original_image=original_image,
<span class="hljs-meta">... </span> prompt_embeds=prompt_embeds,
<span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_II.png&quot;</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Kt,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function La(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW1nMkltZ1BpcGVsaW5lJTJDJTIwSUZJbWcySW1nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQWltcG9ydCUyMHJlcXVlc3RzJTBBZnJvbSUyMGlvJTIwaW1wb3J0JTIwQnl0ZXNJTyUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZzdGFibGUtZGlmZnVzaW9uJTJGbWFpbiUyRmFzc2V0cyUyRnN0YWJsZS1zYW1wbGVzJTJGaW1nMmltZyUyRnNrZXRjaC1tb3VudGFpbnMtaW5wdXQuanBnJTIyJTBBcmVzcG9uc2UlMjAlM0QlMjByZXF1ZXN0cy5nZXQodXJsKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwSW1hZ2Uub3BlbihCeXRlc0lPKHJlc3BvbnNlLmNvbnRlbnQpKS5jb252ZXJ0KCUyMlJHQiUyMiklMEFvcmlnaW5hbF9pbWFnZSUyMCUzRCUyMG9yaWdpbmFsX2ltYWdlLnJlc2l6ZSgoNzY4JTJDJTIwNTEyKSklMEElMEFwaXBlJTIwJTNEJTIwSUZJbWcySW1nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwZmFudGFzeSUyMGxhbmRzY2FwZSUyMGluJTIwc3R5bGUlMjBtaW5lY3JhZnQlMjIlMEFwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RvcmlnaW5hbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEEpLmltYWdlcyUwQSUwQSUyMyUyMHNhdmUlMjBpbnRlcm1lZGlhdGUlMjBpbWFnZSUwQXBpbF9pbWFnZSUyMCUzRCUyMHB0X3RvX3BpbChpbWFnZSklMEFwaWxfaW1hZ2UlNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQXN1cGVyX3Jlc18xX3BpcGUlMjAlM0QlMjBJRkltZzJJbWdTdXBlclJlc29sdXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSUktTC12MS4wJTIyJTJDJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQXN1cGVyX3Jlc18xX3BpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFpbWFnZSUyMCUzRCUyMHN1cGVyX3Jlc18xX3BpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>response = requests.get(url)
<span class="hljs-meta">&gt;&gt;&gt; </span>original_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>original_image = original_image.resize((<span class="hljs-number">768</span>, <span class="hljs-number">512</span>))
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = IFImg2ImgPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>,
<span class="hljs-meta">... </span> variant=<span class="hljs-string">&quot;fp16&quot;</span>,
<span class="hljs-meta">... </span> torch_dtype=torch.float16,
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;A fantasy landscape in style minecraft&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(
<span class="hljs-meta">... </span> image=original_image,
<span class="hljs-meta">... </span> prompt_embeds=prompt_embeds,
<span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds,
<span class="hljs-meta">... </span> output_type=<span class="hljs-string">&quot;pt&quot;</span>,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save intermediate image</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image = pt_to_pil(image)
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_I.png&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>,
<span class="hljs-meta">... </span> text_encoder=<span class="hljs-literal">None</span>,
<span class="hljs-meta">... </span> variant=<span class="hljs-string">&quot;fp16&quot;</span>,
<span class="hljs-meta">... </span> torch_dtype=torch.float16,
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>image = super_res_1_pipe(
<span class="hljs-meta">... </span> image=image,
<span class="hljs-meta">... </span> original_image=original_image,
<span class="hljs-meta">... </span> prompt_embeds=prompt_embeds,
<span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_II.png&quot;</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Kt,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function Da(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW5wYWludGluZ1BpcGVsaW5lJTJDJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQWltcG9ydCUyMHJlcXVlc3RzJTBBZnJvbSUyMGlvJTIwaW1wb3J0JTIwQnl0ZXNJTyUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmRpZmZ1c2VycyUyRmRvY3MtaW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZpZiUyRnBlcnNvbi5wbmclMjIlMEFyZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBb3JpZ2luYWxfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpLmNvbnZlcnQoJTIyUkdCJTIyKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwb3JpZ2luYWxfaW1hZ2UlMEElMEF1cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZkaWZmdXNlcnMlMkZkb2NzLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGaWYlMkZnbGFzc2VzX21hc2sucG5nJTIyJTBBcmVzcG9uc2UlMjAlM0QlMjByZXF1ZXN0cy5nZXQodXJsKSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpJTBBbWFza19pbWFnZSUyMCUzRCUyMG1hc2tfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwSUZJbnBhaW50aW5nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJibHVlJTIwc3VuZ2xhc3NlcyUyMiUwQXByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9lbWJlZHMlMjAlM0QlMjBwaXBlLmVuY29kZV9wcm9tcHQocHJvbXB0KSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBpbWFnZSUzRG9yaWdpbmFsX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTJDJTBBKS5pbWFnZXMlMEElMEElMjMlMjBzYXZlJTIwaW50ZXJtZWRpYXRlJTIwaW1hZ2UlMEFwaWxfaW1hZ2UlMjAlM0QlMjBwdF90b19waWwoaW1hZ2UpJTBBcGlsX2ltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JLnBuZyUyMiklMEElMEFzdXBlcl9yZXNfMV9waXBlJTIwJTNEJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMV9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMV9waXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>response = requests.get(url)
<span class="hljs-meta">&gt;&gt;&gt; </span>original_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>original_image = original_image
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>response = requests.get(url)
<span class="hljs-meta">&gt;&gt;&gt; </span>mask_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content))
<span class="hljs-meta">&gt;&gt;&gt; </span>mask_image = mask_image
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = IFInpaintingPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;blue sunglasses&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(
<span class="hljs-meta">... </span> image=original_image,
<span class="hljs-meta">... </span> mask_image=mask_image,
<span class="hljs-meta">... </span> prompt_embeds=prompt_embeds,
<span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds,
<span class="hljs-meta">... </span> output_type=<span class="hljs-string">&quot;pt&quot;</span>,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save intermediate image</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image = pt_to_pil(image)
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_I.png&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>image = super_res_1_pipe(
<span class="hljs-meta">... </span> image=image,
<span class="hljs-meta">... </span> mask_image=mask_image,
<span class="hljs-meta">... </span> original_image=original_image,
<span class="hljs-meta">... </span> prompt_embeds=prompt_embeds,
<span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_II.png&quot;</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Kt,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function Aa(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW5wYWludGluZ1BpcGVsaW5lJTJDJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQWltcG9ydCUyMHJlcXVlc3RzJTBBZnJvbSUyMGlvJTIwaW1wb3J0JTIwQnl0ZXNJTyUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmRpZmZ1c2VycyUyRmRvY3MtaW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZpZiUyRnBlcnNvbi5wbmclMjIlMEFyZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBb3JpZ2luYWxfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpLmNvbnZlcnQoJTIyUkdCJTIyKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwb3JpZ2luYWxfaW1hZ2UlMEElMEF1cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZkaWZmdXNlcnMlMkZkb2NzLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGaWYlMkZnbGFzc2VzX21hc2sucG5nJTIyJTBBcmVzcG9uc2UlMjAlM0QlMjByZXF1ZXN0cy5nZXQodXJsKSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpJTBBbWFza19pbWFnZSUyMCUzRCUyMG1hc2tfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwSUZJbnBhaW50aW5nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJibHVlJTIwc3VuZ2xhc3NlcyUyMiUwQSUwQXByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9lbWJlZHMlMjAlM0QlMjBwaXBlLmVuY29kZV9wcm9tcHQocHJvbXB0KSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBpbWFnZSUzRG9yaWdpbmFsX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTJDJTBBKS5pbWFnZXMlMEElMEElMjMlMjBzYXZlJTIwaW50ZXJtZWRpYXRlJTIwaW1hZ2UlMEFwaWxfaW1hZ2UlMjAlM0QlMjBwdF90b19waWwoaW1hZ2UpJTBBcGlsX2ltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JLnBuZyUyMiklMEElMEFzdXBlcl9yZXNfMV9waXBlJTIwJTNEJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMV9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMV9waXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> requests
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>response = requests.get(url)
<span class="hljs-meta">&gt;&gt;&gt; </span>original_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>original_image = original_image
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>response = requests.get(url)
<span class="hljs-meta">&gt;&gt;&gt; </span>mask_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content))
<span class="hljs-meta">&gt;&gt;&gt; </span>mask_image = mask_image
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = IFInpaintingPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;blue sunglasses&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(
<span class="hljs-meta">... </span> image=original_image,
<span class="hljs-meta">... </span> mask_image=mask_image,
<span class="hljs-meta">... </span> prompt_embeds=prompt_embeds,
<span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds,
<span class="hljs-meta">... </span> output_type=<span class="hljs-string">&quot;pt&quot;</span>,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># save intermediate image</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image = pt_to_pil(image)
<span class="hljs-meta">&gt;&gt;&gt; </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_I.png&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>super_res_1_pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>image = super_res_1_pipe(
<span class="hljs-meta">... </span> image=image,
<span class="hljs-meta">... </span> mask_image=mask_image,
<span class="hljs-meta">... </span> original_image=original_image,
<span class="hljs-meta">... </span> prompt_embeds=prompt_embeds,
<span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds,
<span class="hljs-meta">... </span>).images
<span class="hljs-meta">&gt;&gt;&gt; </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">&quot;./if_stage_II.png&quot;</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Kt,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function Ka(j){let f,w,b,_,M,o,I,As='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&amp;logo=apple&amp;logoColor=white%22"/>',en,se,tn,ae,Ks=`DeepFloyd IF is a novel state-of-the-art open-source text-to-image model with a high degree of photorealism and language understanding.
The model is a modular composed of a frozen text encoder and three cascaded pixel diffusion modules:`,nn,le,Os=`<li>Stage 1: a base model that generates 64x64 px image based on text prompt,</li> <li>Stage 2: a 64x64 px =&gt; 256x256 px super-resolution model, and</li> <li>Stage 3: a 256x256 px =&gt; 1024x1024 px super-resolution model
Stage 1 and Stage 2 utilize a frozen text encoder based on the T5 transformer to extract text embeddings, which are then fed into a UNet architecture enhanced with cross-attention and attention pooling.
Stage 3 is <a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler" rel="nofollow">Stability AI’s x4 Upscaling model</a>.
The result is a highly efficient model that outperforms current state-of-the-art models, achieving a zero-shot FID score of 6.66 on the COCO dataset.
Our work underscores the potential of larger UNet architectures in the first stage of cascaded diffusion models and depicts a promising future for text-to-image synthesis.</li>`,sn,oe,an,ie,ea="Before you can use IF, you need to accept its usage conditions. To do so:",ln,pe,ta='<li>Make sure to have a <a href="https://huggingface.co/join" rel="nofollow">Hugging Face account</a> and be logged in.</li> <li>Accept the license on the model card of <a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0" rel="nofollow">DeepFloyd/IF-I-XL-v1.0</a>. Accepting the license on the stage I model card will auto accept for the other IF models.</li> <li>Make sure to login locally. Install <code>huggingface_hub</code>:</li>',on,re,pn,ce,na="run the login function in a Python shell:",rn,de,cn,me,sa='and enter your <a href="https://huggingface.co/docs/hub/security-tokens#what-are-user-access-tokens" rel="nofollow">Hugging Face Hub access token</a>.',dn,ge,aa="Next we install <code>diffusers</code> and dependencies:",mn,ue,gn,fe,la="The following sections give more in-detail examples of how to use IF. Specifically:",un,he,oa='<li><a href="#text-to-image-generation">Text-to-Image Generation</a></li> <li><a href="#text-guided-image-to-image-generation">Image-to-Image Generation</a></li> <li><a href="#text-guided-inpainting-generation">Inpainting</a></li> <li><a href="#converting-between-different-pipelines">Reusing model weights</a></li> <li><a href="#optimizing-for-speed">Speed optimization</a></li> <li><a href="#optimizing-for-memory">Memory optimization</a></li>',fn,_e,ia="<strong>Available checkpoints</strong>",hn,ye,pa='<li><p><em>Stage-1</em></p> <ul><li><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0" rel="nofollow">DeepFloyd/IF-I-XL-v1.0</a></li> <li><a href="https://huggingface.co/DeepFloyd/IF-I-L-v1.0" rel="nofollow">DeepFloyd/IF-I-L-v1.0</a></li> <li><a href="https://huggingface.co/DeepFloyd/IF-I-M-v1.0" rel="nofollow">DeepFloyd/IF-I-M-v1.0</a></li></ul></li> <li><p><em>Stage-2</em></p> <ul><li><a href="https://huggingface.co/DeepFloyd/IF-II-L-v1.0" rel="nofollow">DeepFloyd/IF-II-L-v1.0</a></li> <li><a href="https://huggingface.co/DeepFloyd/IF-II-M-v1.0" rel="nofollow">DeepFloyd/IF-II-M-v1.0</a></li></ul></li> <li><p><em>Stage-3</em></p> <ul><li><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler" rel="nofollow">stabilityai/stable-diffusion-x4-upscaler</a></li></ul></li>',_n,Me,ra='<strong>Google Colab</strong> <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/deepfloyd_if_free_tier_google_colab.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>',yn,be,Mn,Ie,ca='By default diffusers makes use of <a href="../../optimization/memory#model-offloading">model cpu offloading</a> to run the whole IF pipeline with as little as 14 GB of VRAM.',bn,we,In,Je,wn,Ue,da=`The same IF model weights can be used for text-guided image-to-image translation or image variation.
In this case just make sure to load the weights using the <a href="/docs/diffusers/pr_12262/en/api/pipelines/deepfloyd_if#diffusers.IFImg2ImgPipeline">IFImg2ImgPipeline</a> and <a href="/docs/diffusers/pr_12262/en/api/pipelines/deepfloyd_if#diffusers.IFImg2ImgSuperResolutionPipeline">IFImg2ImgSuperResolutionPipeline</a> pipelines.`,Jn,Te,ma=`<strong>Note</strong>: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines
without loading them twice by making use of the <a href="/docs/diffusers/pr_12262/en/api/pipelines/overview#diffusers.DiffusionPipeline.components">components</a> argument as explained <a href="#converting-between-different-pipelines">here</a>.`,Un,Ze,Tn,ve,Zn,je,ga=`The same IF model weights can be used for text-guided image-to-image translation or image variation.
In this case just make sure to load the weights using the <a href="/docs/diffusers/pr_12262/en/api/pipelines/deepfloyd_if#diffusers.IFInpaintingPipeline">IFInpaintingPipeline</a> and <a href="/docs/diffusers/pr_12262/en/api/pipelines/deepfloyd_if#diffusers.IFInpaintingSuperResolutionPipeline">IFInpaintingSuperResolutionPipeline</a> pipelines.`,vn,We,ua=`<strong>Note</strong>: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines
without loading them twice by making use of the <code>~DiffusionPipeline.components()</code> function as explained <a href="#converting-between-different-pipelines">here</a>.`,jn,Be,Wn,Fe,Bn,Xe,fa="In addition to being loaded with <code>from_pretrained</code>, Pipelines can also be loaded directly from each other.",Fn,ke,Xn,Ge,kn,Ce,ha="The simplest optimization to run IF faster is to move all model components to the GPU.",Gn,Re,Cn,Ve,_a="You can also run the diffusion process for a shorter number of timesteps.",Rn,xe,ya="This can either be done with the <code>num_inference_steps</code> argument:",Vn,Se,xn,Ee,Ma="Or with the <code>timesteps</code> argument:",Sn,Ne,En,Qe,ba=`When doing image variation or inpainting, you can also decrease the number of timesteps
with the strength argument. The strength argument is the amount of noise to add to the input image which also determines how many steps to run in the denoising process.
A smaller number will vary the image less but run faster.`,Nn,$e,Qn,Ye,Ia=`You can also use <a href="../../optimization/fp16#torchcompile"><code>torch.compile</code></a>. Note that we have not exhaustively tested <code>torch.compile</code>
with IF and it might not give expected results.`,$n,Pe,Yn,He,Pn,ze,wa="When optimizing for GPU memory, we can use the standard diffusers CPU offloading APIs.",Hn,qe,Ja="Either the model based CPU offloading,",zn,Le,qn,De,Ua="or the more aggressive layer based CPU offloading.",Ln,Ae,Dn,Ke,Ta="Additionally, T5 can be loaded in 8bit precision",An,Oe,Kn,et,Za=`For CPU RAM constrained machines like Google Colab free tier where we can’t load all model components to the CPU at once, we can manually only load the pipeline with
the text encoder or UNet when the respective model components are needed.`,On,tt,es,nt,ts,st,va='<thead><tr><th>Pipeline</th> <th>Tasks</th> <th align="center">Colab</th></tr></thead> <tbody><tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py" rel="nofollow">pipeline_if.py</a></td> <td><em>Text-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py" rel="nofollow">pipeline_if_superresolution.py</a></td> <td><em>Text-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py" rel="nofollow">pipeline_if_img2img.py</a></td> <td><em>Image-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py" rel="nofollow">pipeline_if_img2img_superresolution.py</a></td> <td><em>Image-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py" rel="nofollow">pipeline_if_inpainting.py</a></td> <td><em>Image-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py" rel="nofollow">pipeline_if_inpainting_superresolution.py</a></td> <td><em>Image-to-Image Generation</em></td> <td align="center">-</td></tr></tbody>',ns,at,ss,W,lt,Is,C,ot,ws,Ft,ja="Function invoked when calling the pipeline for generation.",Js,P,Us,H,it,Ts,Xt,Wa="Encodes the prompt into text encoder hidden states.",as,pt,ls,B,rt,Zs,R,ct,vs,kt,Ba="Function invoked when calling the pipeline for generation.",js,z,Ws,q,dt,Bs,Gt,Fa="Encodes the prompt into text encoder hidden states.",os,mt,is,F,gt,Fs,V,ut,Xs,Ct,Xa="Function invoked when calling the pipeline for generation.",ks,L,Gs,D,ft,Cs,Rt,ka="Encodes the prompt into text encoder hidden states.",ps,ht,rs,X,_t,Rs,x,yt,Vs,Vt,Ga="Function invoked when calling the pipeline for generation.",xs,A,Ss,K,Mt,Es,xt,Ca="Encodes the prompt into text encoder hidden states.",cs,bt,ds,k,It,Ns,S,wt,Qs,St,Ra="Function invoked when calling the pipeline for generation.",$s,O,Ys,ee,Jt,Ps,Et,Va="Encodes the prompt into text encoder hidden states.",ms,Ut,gs,G,Tt,Hs,E,Zt,zs,Nt,xa="Function invoked when calling the pipeline for generation.",qs,te,Ls,ne,vt,Ds,Qt,Sa="Encodes the prompt into text encoder hidden states.",us,jt,fs,Ot,hs;return M=new v({props:{title:"DeepFloyd IF",local:"deepfloyd-if",headingTag:"h1"}}),se=new v({props:{title:"Overview",local:"overview",headingTag:"h2"}}),oe=new v({props:{title:"Usage",local:"usage",headingTag:"h2"}}),re=new J({props:{code:"cGlwJTIwaW5zdGFsbCUyMGh1Z2dpbmdmYWNlX2h1YiUyMC0tdXBncmFkZQ==",highlighted:"pip install huggingface_hub --upgrade",wrap:!1}}),de=new J({props:{code:"ZnJvbSUyMGh1Z2dpbmdmYWNlX2h1YiUyMGltcG9ydCUyMGxvZ2luJTBBJTBBbG9naW4oKQ==",highlighted:`<span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> login
login()`,wrap:!1}}),ue=new J({props:{code:"cGlwJTIwaW5zdGFsbCUyMC1xJTIwZGlmZnVzZXJzJTIwYWNjZWxlcmF0ZSUyMHRyYW5zZm9ybWVycw==",highlighted:"pip install -q diffusers accelerate transformers",wrap:!1}}),be=new v({props:{title:"Text-to-Image Generation",local:"text-to-image-generation",headingTag:"h3"}}),we=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMHB0X3RvX3BpbCUyQyUyMG1ha2VfaW1hZ2VfZ3JpZCUwQWltcG9ydCUyMHRvcmNoJTBBJTBBJTIzJTIwc3RhZ2UlMjAxJTBBc3RhZ2VfMSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFzdGFnZV8xLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBJTIzJTIwc3RhZ2UlMjAyJTBBc3RhZ2VfMiUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJEZWVwRmxveWQlMkZJRi1JSS1MLXYxLjAlMjIlMkMlMjB0ZXh0X2VuY29kZXIlM0ROb25lJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBc3RhZ2VfMi5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQSUyMyUyMHN0YWdlJTIwMyUwQXNhZmV0eV9tb2R1bGVzJTIwJTNEJTIwJTdCJTBBJTIwJTIwJTIwJTIwJTIyZmVhdHVyZV9leHRyYWN0b3IlMjIlM0ElMjBzdGFnZV8xLmZlYXR1cmVfZXh0cmFjdG9yJTJDJTBBJTIwJTIwJTIwJTIwJTIyc2FmZXR5X2NoZWNrZXIlMjIlM0ElMjBzdGFnZV8xLnNhZmV0eV9jaGVja2VyJTJDJTBBJTIwJTIwJTIwJTIwJTIyd2F0ZXJtYXJrZXIlMjIlM0ElMjBzdGFnZV8xLndhdGVybWFya2VyJTJDJTBBJTdEJTBBc3RhZ2VfMyUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teDQtdXBzY2FsZXIlMjIlMkMlMjAqKnNhZmV0eV9tb2R1bGVzJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXN0YWdlXzMuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAnYSUyMHBob3RvJTIwb2YlMjBhJTIwa2FuZ2Fyb28lMjB3ZWFyaW5nJTIwYW4lMjBvcmFuZ2UlMjBob29kaWUlMjBhbmQlMjBibHVlJTIwc3VuZ2xhc3NlcyUyMHN0YW5kaW5nJTIwaW4lMjBmcm9udCUyMG9mJTIwdGhlJTIwZWlmZmVsJTIwdG93ZXIlMjBob2xkaW5nJTIwYSUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMCUyMnZlcnklMjBkZWVwJTIwbGVhcm5pbmclMjInJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoMSklMEElMEElMjMlMjB0ZXh0JTIwZW1iZWRzJTBBcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX2VtYmVkcyUyMCUzRCUyMHN0YWdlXzEuZW5jb2RlX3Byb21wdChwcm9tcHQpJTBBJTBBJTIzJTIwc3RhZ2UlMjAxJTBBc3RhZ2VfMV9vdXRwdXQlMjAlM0QlMjBzdGFnZV8xKCUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUyMGdlbmVyYXRvciUzRGdlbmVyYXRvciUyQyUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMEEpLmltYWdlcyUwQSUyM3B0X3RvX3BpbChzdGFnZV8xX291dHB1dCklNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQSUyMyUyMHN0YWdlJTIwMiUwQXN0YWdlXzJfb3V0cHV0JTIwJTNEJTIwc3RhZ2VfMiglMEElMjAlMjAlMjAlMjBpbWFnZSUzRHN0YWdlXzFfb3V0cHV0JTJDJTBBJTIwJTIwJTIwJTIwcHJvbXB0X2VtYmVkcyUzRHByb21wdF9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHRfZW1iZWRzJTNEbmVnYXRpdmVfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJwdCUyMiUyQyUwQSkuaW1hZ2VzJTBBJTIzcHRfdG9fcGlsKHN0YWdlXzJfb3V0cHV0KSU1QjAlNUQuc2F2ZSglMjIuJTJGaWZfc3RhZ2VfSUkucG5nJTIyKSUwQSUwQSUyMyUyMHN0YWdlJTIwMyUwQXN0YWdlXzNfb3V0cHV0JTIwJTNEJTIwc3RhZ2VfMyhwcm9tcHQlM0Rwcm9tcHQlMkMlMjBpbWFnZSUzRHN0YWdlXzJfb3V0cHV0JTJDJTIwbm9pc2VfbGV2ZWwlM0QxMDAlMkMlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IpLmltYWdlcyUwQSUyM3N0YWdlXzNfb3V0cHV0JTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSUkucG5nJTIyKSUwQW1ha2VfaW1hZ2VfZ3JpZCglNUJwdF90b19waWwoc3RhZ2VfMV9vdXRwdXQpJTVCMCU1RCUyQyUyMHB0X3RvX3BpbChzdGFnZV8yX291dHB1dCklNUIwJTVEJTJDJTIwc3RhZ2VfM19vdXRwdXQlNUIwJTVEJTVEJTJDJTIwcm93cyUzRDElMkMlMjByb3dzJTNEMyk=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline
<span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil, make_image_grid
<span class="hljs-keyword">import</span> torch
<span class="hljs-comment"># stage 1</span>
stage_1 = DiffusionPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
stage_1.enable_model_cpu_offload()
<span class="hljs-comment"># stage 2</span>
stage_2 = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
)
stage_2.enable_model_cpu_offload()
<span class="hljs-comment"># stage 3</span>
safety_modules = {
<span class="hljs-string">&quot;feature_extractor&quot;</span>: stage_1.feature_extractor,
<span class="hljs-string">&quot;safety_checker&quot;</span>: stage_1.safety_checker,
<span class="hljs-string">&quot;watermarker&quot;</span>: stage_1.watermarker,
}
stage_3 = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-x4-upscaler&quot;</span>, **safety_modules, torch_dtype=torch.float16
)
stage_3.enable_model_cpu_offload()
prompt = <span class="hljs-string">&#x27;a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says &quot;very deep learning&quot;&#x27;</span>
generator = torch.manual_seed(<span class="hljs-number">1</span>)
<span class="hljs-comment"># text embeds</span>
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt)
<span class="hljs-comment"># stage 1</span>
stage_1_output = stage_1(
prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type=<span class="hljs-string">&quot;pt&quot;</span>
).images
<span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save(&quot;./if_stage_I.png&quot;)</span>
<span class="hljs-comment"># stage 2</span>
stage_2_output = stage_2(
image=stage_1_output,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
generator=generator,
output_type=<span class="hljs-string">&quot;pt&quot;</span>,
).images
<span class="hljs-comment">#pt_to_pil(stage_2_output)[0].save(&quot;./if_stage_II.png&quot;)</span>
<span class="hljs-comment"># stage 3</span>
stage_3_output = stage_3(prompt=prompt, image=stage_2_output, noise_level=<span class="hljs-number">100</span>, generator=generator).images
<span class="hljs-comment">#stage_3_output[0].save(&quot;./if_stage_III.png&quot;)</span>
make_image_grid([pt_to_pil(stage_1_output)[<span class="hljs-number">0</span>], pt_to_pil(stage_2_output)[<span class="hljs-number">0</span>], stage_3_output[<span class="hljs-number">0</span>]], rows=<span class="hljs-number">1</span>, rows=<span class="hljs-number">3</span>)`,wrap:!1}}),Je=new v({props:{title:"Text Guided Image-to-Image Generation",local:"text-guided-image-to-image-generation",headingTag:"h3"}}),Ze=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW1nMkltZ1BpcGVsaW5lJTJDJTIwSUZJbWcySW1nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMkMlMjBsb2FkX2ltYWdlJTJDJTIwbWFrZV9pbWFnZV9ncmlkJTBBaW1wb3J0JTIwdG9yY2glMEElMEElMjMlMjBkb3dubG9hZCUyMGltYWdlJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRnN0YWJsZS1kaWZmdXNpb24lMkZtYWluJTJGYXNzZXRzJTJGc3RhYmxlLXNhbXBsZXMlMkZpbWcyaW1nJTJGc2tldGNoLW1vdW50YWlucy1pbnB1dC5qcGclMjIlMEFvcmlnaW5hbF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UodXJsKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwb3JpZ2luYWxfaW1hZ2UucmVzaXplKCg3NjglMkMlMjA1MTIpKSUwQSUwQSUyMyUyMHN0YWdlJTIwMSUwQXN0YWdlXzElMjAlM0QlMjBJRkltZzJJbWdQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBc3RhZ2VfMS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQSUyMyUyMHN0YWdlJTIwMiUwQXN0YWdlXzIlMjAlM0QlMjBJRkltZzJJbWdTdXBlclJlc29sdXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSUktTC12MS4wJTIyJTJDJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXN0YWdlXzIuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEElMjMlMjBzdGFnZSUyMDMlMEFzYWZldHlfbW9kdWxlcyUyMCUzRCUyMCU3QiUwQSUyMCUyMCUyMCUyMCUyMmZlYXR1cmVfZXh0cmFjdG9yJTIyJTNBJTIwc3RhZ2VfMS5mZWF0dXJlX2V4dHJhY3RvciUyQyUwQSUyMCUyMCUyMCUyMCUyMnNhZmV0eV9jaGVja2VyJTIyJTNBJTIwc3RhZ2VfMS5zYWZldHlfY2hlY2tlciUyQyUwQSUyMCUyMCUyMCUyMCUyMndhdGVybWFya2VyJTIyJTNBJTIwc3RhZ2VfMS53YXRlcm1hcmtlciUyQyUwQSU3RCUwQXN0YWdlXzMlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXg0LXVwc2NhbGVyJTIyJTJDJTIwKipzYWZldHlfbW9kdWxlcyUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdGFnZV8zLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMGZhbnRhc3klMjBsYW5kc2NhcGUlMjBpbiUyMHN0eWxlJTIwbWluZWNyYWZ0JTIyJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoMSklMEElMEElMjMlMjB0ZXh0JTIwZW1iZWRzJTBBcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX2VtYmVkcyUyMCUzRCUyMHN0YWdlXzEuZW5jb2RlX3Byb21wdChwcm9tcHQpJTBBJTBBJTIzJTIwc3RhZ2UlMjAxJTBBc3RhZ2VfMV9vdXRwdXQlMjAlM0QlMjBzdGFnZV8xKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTJDJTBBKS5pbWFnZXMlMEElMjNwdF90b19waWwoc3RhZ2VfMV9vdXRwdXQpJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JLnBuZyUyMiklMEElMEElMjMlMjBzdGFnZSUyMDIlMEFzdGFnZV8yX291dHB1dCUyMCUzRCUyMHN0YWdlXzIoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RzdGFnZV8xX291dHB1dCUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTJDJTBBKS5pbWFnZXMlMEElMjNwdF90b19waWwoc3RhZ2VfMl9vdXRwdXQpJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIpJTBBJTBBJTIzJTIwc3RhZ2UlMjAzJTBBc3RhZ2VfM19vdXRwdXQlMjAlM0QlMjBzdGFnZV8zKHByb21wdCUzRHByb21wdCUyQyUyMGltYWdlJTNEc3RhZ2VfMl9vdXRwdXQlMkMlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMjBub2lzZV9sZXZlbCUzRDEwMCkuaW1hZ2VzJTBBJTIzc3RhZ2VfM19vdXRwdXQlNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0lJSS5wbmclMjIpJTBBbWFrZV9pbWFnZV9ncmlkKCU1Qm9yaWdpbmFsX2ltYWdlJTJDJTIwcHRfdG9fcGlsKHN0YWdlXzFfb3V0cHV0KSU1QjAlNUQlMkMlMjBwdF90b19waWwoc3RhZ2VfMl9vdXRwdXQpJTVCMCU1RCUyQyUyMHN0YWdlXzNfb3V0cHV0JTVCMCU1RCU1RCUyQyUyMHJvd3MlM0QxJTJDJTIwcm93cyUzRDQp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline
<span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil, load_image, make_image_grid
<span class="hljs-keyword">import</span> torch
<span class="hljs-comment"># download image</span>
url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg&quot;</span>
original_image = load_image(url)
original_image = original_image.resize((<span class="hljs-number">768</span>, <span class="hljs-number">512</span>))
<span class="hljs-comment"># stage 1</span>
stage_1 = IFImg2ImgPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
stage_1.enable_model_cpu_offload()
<span class="hljs-comment"># stage 2</span>
stage_2 = IFImg2ImgSuperResolutionPipeline.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
)
stage_2.enable_model_cpu_offload()
<span class="hljs-comment"># stage 3</span>
safety_modules = {
<span class="hljs-string">&quot;feature_extractor&quot;</span>: stage_1.feature_extractor,
<span class="hljs-string">&quot;safety_checker&quot;</span>: stage_1.safety_checker,
<span class="hljs-string">&quot;watermarker&quot;</span>: stage_1.watermarker,
}
stage_3 = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-x4-upscaler&quot;</span>, **safety_modules, torch_dtype=torch.float16
)
stage_3.enable_model_cpu_offload()
prompt = <span class="hljs-string">&quot;A fantasy landscape in style minecraft&quot;</span>
generator = torch.manual_seed(<span class="hljs-number">1</span>)
<span class="hljs-comment"># text embeds</span>
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt)
<span class="hljs-comment"># stage 1</span>
stage_1_output = stage_1(
image=original_image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
generator=generator,
output_type=<span class="hljs-string">&quot;pt&quot;</span>,
).images
<span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save(&quot;./if_stage_I.png&quot;)</span>
<span class="hljs-comment"># stage 2</span>
stage_2_output = stage_2(
image=stage_1_output,
original_image=original_image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
generator=generator,
output_type=<span class="hljs-string">&quot;pt&quot;</span>,
).images
<span class="hljs-comment">#pt_to_pil(stage_2_output)[0].save(&quot;./if_stage_II.png&quot;)</span>
<span class="hljs-comment"># stage 3</span>
stage_3_output = stage_3(prompt=prompt, image=stage_2_output, generator=generator, noise_level=<span class="hljs-number">100</span>).images
<span class="hljs-comment">#stage_3_output[0].save(&quot;./if_stage_III.png&quot;)</span>
make_image_grid([original_image, pt_to_pil(stage_1_output)[<span class="hljs-number">0</span>], pt_to_pil(stage_2_output)[<span class="hljs-number">0</span>], stage_3_output[<span class="hljs-number">0</span>]], rows=<span class="hljs-number">1</span>, rows=<span class="hljs-number">4</span>)`,wrap:!1}}),ve=new v({props:{title:"Text Guided Inpainting Generation",local:"text-guided-inpainting-generation",headingTag:"h3"}}),Be=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW5wYWludGluZ1BpcGVsaW5lJTJDJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMkMlMjBsb2FkX2ltYWdlJTJDJTIwbWFrZV9pbWFnZV9ncmlkJTBBaW1wb3J0JTIwdG9yY2glMEElMEElMjMlMjBkb3dubG9hZCUyMGltYWdlJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGZGlmZnVzZXJzJTJGZG9jcy1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmlmJTJGcGVyc29uLnBuZyUyMiUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSh1cmwpJTBBJTBBJTIzJTIwZG93bmxvYWQlMjBtYXNrJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGZGlmZnVzZXJzJTJGZG9jcy1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmlmJTJGZ2xhc3Nlc19tYXNrLnBuZyUyMiUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKHVybCklMEElMEElMjMlMjBzdGFnZSUyMDElMEFzdGFnZV8xJTIwJTNEJTIwSUZJbnBhaW50aW5nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQXN0YWdlXzEuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEElMjMlMjBzdGFnZSUyMDIlMEFzdGFnZV8yJTIwJTNEJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdGFnZV8yLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBJTIzJTIwc3RhZ2UlMjAzJTBBc2FmZXR5X21vZHVsZXMlMjAlM0QlMjAlN0IlMEElMjAlMjAlMjAlMjAlMjJmZWF0dXJlX2V4dHJhY3RvciUyMiUzQSUyMHN0YWdlXzEuZmVhdHVyZV9leHRyYWN0b3IlMkMlMEElMjAlMjAlMjAlMjAlMjJzYWZldHlfY2hlY2tlciUyMiUzQSUyMHN0YWdlXzEuc2FmZXR5X2NoZWNrZXIlMkMlMEElMjAlMjAlMjAlMjAlMjJ3YXRlcm1hcmtlciUyMiUzQSUyMHN0YWdlXzEud2F0ZXJtYXJrZXIlMkMlMEElN0QlMEFzdGFnZV8zJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJpbGl0eWFpJTJGc3RhYmxlLWRpZmZ1c2lvbi14NC11cHNjYWxlciUyMiUyQyUyMCoqc2FmZXR5X21vZHVsZXMlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBc3RhZ2VfMy5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMmJsdWUlMjBzdW5nbGFzc2VzJTIyJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoMSklMEElMEElMjMlMjB0ZXh0JTIwZW1iZWRzJTBBcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX2VtYmVkcyUyMCUzRCUyMHN0YWdlXzEuZW5jb2RlX3Byb21wdChwcm9tcHQpJTBBJTBBJTIzJTIwc3RhZ2UlMjAxJTBBc3RhZ2VfMV9vdXRwdXQlMjAlM0QlMjBzdGFnZV8xKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRGdlbmVyYXRvciUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEEpLmltYWdlcyUwQSUyM3B0X3RvX3BpbChzdGFnZV8xX291dHB1dCklNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQSUyMyUyMHN0YWdlJTIwMiUwQXN0YWdlXzJfb3V0cHV0JTIwJTNEJTIwc3RhZ2VfMiglMEElMjAlMjAlMjAlMjBpbWFnZSUzRHN0YWdlXzFfb3V0cHV0JTJDJTBBJTIwJTIwJTIwJTIwb3JpZ2luYWxfaW1hZ2UlM0RvcmlnaW5hbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG1hc2tfaW1hZ2UlM0RtYXNrX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwcHJvbXB0X2VtYmVkcyUzRHByb21wdF9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHRfZW1iZWRzJTNEbmVnYXRpdmVfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJwdCUyMiUyQyUwQSkuaW1hZ2VzJTBBJTIzcHRfdG9fcGlsKHN0YWdlXzFfb3V0cHV0KSU1QjAlNUQuc2F2ZSglMjIuJTJGaWZfc3RhZ2VfSUkucG5nJTIyKSUwQSUwQSUyMyUyMHN0YWdlJTIwMyUwQXN0YWdlXzNfb3V0cHV0JTIwJTNEJTIwc3RhZ2VfMyhwcm9tcHQlM0Rwcm9tcHQlMkMlMjBpbWFnZSUzRHN0YWdlXzJfb3V0cHV0JTJDJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTIwbm9pc2VfbGV2ZWwlM0QxMDApLmltYWdlcyUwQSUyM3N0YWdlXzNfb3V0cHV0JTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSUkucG5nJTIyKSUwQW1ha2VfaW1hZ2VfZ3JpZCglNUJvcmlnaW5hbF9pbWFnZSUyQyUyMG1hc2tfaW1hZ2UlMkMlMjBwdF90b19waWwoc3RhZ2VfMV9vdXRwdXQpJTVCMCU1RCUyQyUyMHB0X3RvX3BpbChzdGFnZV8yX291dHB1dCklNUIwJTVEJTJDJTIwc3RhZ2VfM19vdXRwdXQlNUIwJTVEJTVEJTJDJTIwcm93cyUzRDElMkMlMjByb3dzJTNENSk=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline
<span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil, load_image, make_image_grid
<span class="hljs-keyword">import</span> torch
<span class="hljs-comment"># download image</span>
url = <span class="hljs-string">&quot;https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png&quot;</span>
original_image = load_image(url)
<span class="hljs-comment"># download mask</span>
url = <span class="hljs-string">&quot;https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png&quot;</span>
mask_image = load_image(url)
<span class="hljs-comment"># stage 1</span>
stage_1 = IFInpaintingPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
stage_1.enable_model_cpu_offload()
<span class="hljs-comment"># stage 2</span>
stage_2 = IFInpaintingSuperResolutionPipeline.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16
)
stage_2.enable_model_cpu_offload()
<span class="hljs-comment"># stage 3</span>
safety_modules = {
<span class="hljs-string">&quot;feature_extractor&quot;</span>: stage_1.feature_extractor,
<span class="hljs-string">&quot;safety_checker&quot;</span>: stage_1.safety_checker,
<span class="hljs-string">&quot;watermarker&quot;</span>: stage_1.watermarker,
}
stage_3 = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-x4-upscaler&quot;</span>, **safety_modules, torch_dtype=torch.float16
)
stage_3.enable_model_cpu_offload()
prompt = <span class="hljs-string">&quot;blue sunglasses&quot;</span>
generator = torch.manual_seed(<span class="hljs-number">1</span>)
<span class="hljs-comment"># text embeds</span>
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt)
<span class="hljs-comment"># stage 1</span>
stage_1_output = stage_1(
image=original_image,
mask_image=mask_image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
generator=generator,
output_type=<span class="hljs-string">&quot;pt&quot;</span>,
).images
<span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save(&quot;./if_stage_I.png&quot;)</span>
<span class="hljs-comment"># stage 2</span>
stage_2_output = stage_2(
image=stage_1_output,
original_image=original_image,
mask_image=mask_image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
generator=generator,
output_type=<span class="hljs-string">&quot;pt&quot;</span>,
).images
<span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save(&quot;./if_stage_II.png&quot;)</span>
<span class="hljs-comment"># stage 3</span>
stage_3_output = stage_3(prompt=prompt, image=stage_2_output, generator=generator, noise_level=<span class="hljs-number">100</span>).images
<span class="hljs-comment">#stage_3_output[0].save(&quot;./if_stage_III.png&quot;)</span>
make_image_grid([original_image, mask_image, pt_to_pil(stage_1_output)[<span class="hljs-number">0</span>], pt_to_pil(stage_2_output)[<span class="hljs-number">0</span>], stage_3_output[<span class="hljs-number">0</span>]], rows=<span class="hljs-number">1</span>, rows=<span class="hljs-number">5</span>)`,wrap:!1}}),Fe=new v({props:{title:"Converting between different pipelines",local:"converting-between-different-pipelines",headingTag:"h3"}}),ke=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGUGlwZWxpbmUlMkMlMjBJRlN1cGVyUmVzb2x1dGlvblBpcGVsaW5lJTBBJTBBcGlwZV8xJTIwJTNEJTIwSUZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyKSUwQXBpcGVfMiUyMCUzRCUyMElGU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiklMEElMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSUZJbWcySW1nUGlwZWxpbmUlMkMlMjBJRkltZzJJbWdTdXBlclJlc29sdXRpb25QaXBlbGluZSUwQSUwQXBpcGVfMSUyMCUzRCUyMElGSW1nMkltZ1BpcGVsaW5lKCoqcGlwZV8xLmNvbXBvbmVudHMpJTBBcGlwZV8yJTIwJTNEJTIwSUZJbWcySW1nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUoKipwaXBlXzIuY29tcG9uZW50cyklMEElMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSUZJbnBhaW50aW5nUGlwZWxpbmUlMkMlMjBJRklucGFpbnRpbmdTdXBlclJlc29sdXRpb25QaXBlbGluZSUwQSUwQXBpcGVfMSUyMCUzRCUyMElGSW5wYWludGluZ1BpcGVsaW5lKCoqcGlwZV8xLmNvbXBvbmVudHMpJTBBcGlwZV8yJTIwJTNEJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUoKipwaXBlXzIuY29tcG9uZW50cyk=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFPipeline, IFSuperResolutionPipeline
pipe_1 = IFPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>)
pipe_2 = IFSuperResolutionPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>)
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline
pipe_1 = IFImg2ImgPipeline(**pipe_1.components)
pipe_2 = IFImg2ImgSuperResolutionPipeline(**pipe_2.components)
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline
pipe_1 = IFInpaintingPipeline(**pipe_1.components)
pipe_2 = IFInpaintingSuperResolutionPipeline(**pipe_2.components)`,wrap:!1}}),Ge=new v({props:{title:"Optimizing for speed",local:"optimizing-for-speed",headingTag:"h3"}}),Re=new J({props:{code:"cGlwZSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIp",highlighted:`pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)`,wrap:!1}}),Se=new J({props:{code:"cGlwZSglMjIlM0Nwcm9tcHQlM0UlMjIlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAp",highlighted:'pipe(<span class="hljs-string">&quot;&lt;prompt&gt;&quot;</span>, num_inference_steps=<span class="hljs-number">30</span>)',wrap:!1}}),Ne=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2Vycy5waXBlbGluZXMuZGVlcGZsb3lkX2lmJTIwaW1wb3J0JTIwZmFzdDI3X3RpbWVzdGVwcyUwQSUwQXBpcGUoJTIyJTNDcHJvbXB0JTNFJTIyJTJDJTIwdGltZXN0ZXBzJTNEZmFzdDI3X3RpbWVzdGVwcyk=",highlighted:`<span class="hljs-keyword">from</span> diffusers.pipelines.deepfloyd_if <span class="hljs-keyword">import</span> fast27_timesteps
pipe(<span class="hljs-string">&quot;&lt;prompt&gt;&quot;</span>, timesteps=fast27_timesteps)`,wrap:!1}}),$e=new J({props:{code:"cGlwZSUyMCUzRCUyMElGSW1nMkltZ1BpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKGltYWdlJTNEaW1hZ2UlMkMlMjBwcm9tcHQlM0QlMjIlM0Nwcm9tcHQlM0UlMjIlMkMlMjBzdHJlbmd0aCUzRDAuMykuaW1hZ2Vz",highlighted:`pipe = IFImg2ImgPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
image = pipe(image=image, prompt=<span class="hljs-string">&quot;&lt;prompt&gt;&quot;</span>, strength=<span class="hljs-number">0.3</span>).images`,wrap:!1}}),Pe=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFwaXBlLnRleHRfZW5jb2RlciUyMCUzRCUyMHRvcmNoLmNvbXBpbGUocGlwZS50ZXh0X2VuY29kZXIlMkMlMjBtb2RlJTNEJTIycmVkdWNlLW92ZXJoZWFkJTIyJTJDJTIwZnVsbGdyYXBoJTNEVHJ1ZSklMEFwaXBlLnVuZXQlMjAlM0QlMjB0b3JjaC5jb21waWxlKHBpcGUudW5ldCUyQyUyMG1vZGUlM0QlMjJyZWR1Y2Utb3ZlcmhlYWQlMjIlMkMlMjBmdWxsZ3JhcGglM0RUcnVlKQ==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline
<span class="hljs-keyword">import</span> torch
pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
pipe.text_encoder = torch.<span class="hljs-built_in">compile</span>(pipe.text_encoder, mode=<span class="hljs-string">&quot;reduce-overhead&quot;</span>, fullgraph=<span class="hljs-literal">True</span>)
pipe.unet = torch.<span class="hljs-built_in">compile</span>(pipe.unet, mode=<span class="hljs-string">&quot;reduce-overhead&quot;</span>, fullgraph=<span class="hljs-literal">True</span>)`,wrap:!1}}),He=new v({props:{title:"Optimizing for memory",local:"optimizing-for-memory",headingTag:"h3"}}),Le=new J({props:{code:"cGlwZSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgp",highlighted:`pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()`,wrap:!1}}),Ae=new J({props:{code:"cGlwZSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlLmVuYWJsZV9zZXF1ZW50aWFsX2NwdV9vZmZsb2FkKCk=",highlighted:`pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16)
pipe.enable_sequential_cpu_offload()`,wrap:!1}}),Oe=new J({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMFQ1RW5jb2Rlck1vZGVsJTBBJTBBdGV4dF9lbmNvZGVyJTIwJTNEJTIwVDVFbmNvZGVyTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHN1YmZvbGRlciUzRCUyMnRleHRfZW5jb2RlciUyMiUyQyUyMGRldmljZV9tYXAlM0QlMjJhdXRvJTIyJTJDJTIwbG9hZF9pbl84Yml0JTNEVHJ1ZSUyQyUyMHZhcmlhbnQlM0QlMjI4Yml0JTIyJTBBKSUwQSUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBEaWZmdXNpb25QaXBlbGluZSUwQSUwQXBpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTNEdGV4dF9lbmNvZGVyJTJDJTIwJTIwJTIzJTIwcGFzcyUyMHRoZSUyMHByZXZpb3VzbHklMjBpbnN0YW50aWF0ZWQlMjA4Yml0JTIwdGV4dCUyMGVuY29kZXIlMEElMjAlMjAlMjAlMjB1bmV0JTNETm9uZSUyQyUwQSUyMCUyMCUyMCUyMGRldmljZV9tYXAlM0QlMjJhdXRvJTIyJTJDJTBBKSUwQSUwQXByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9lbWJlZHMlMjAlM0QlMjBwaXBlLmVuY29kZV9wcm9tcHQoJTIyJTNDcHJvbXB0JTNFJTIyKQ==",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel
text_encoder = T5EncoderModel.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, subfolder=<span class="hljs-string">&quot;text_encoder&quot;</span>, device_map=<span class="hljs-string">&quot;auto&quot;</span>, load_in_8bit=<span class="hljs-literal">True</span>, variant=<span class="hljs-string">&quot;8bit&quot;</span>
)
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>,
text_encoder=text_encoder, <span class="hljs-comment"># pass the previously instantiated 8bit text encoder</span>
unet=<span class="hljs-literal">None</span>,
device_map=<span class="hljs-string">&quot;auto&quot;</span>,
)
prompt_embeds, negative_embeds = pipe.encode_prompt(<span class="hljs-string">&quot;&lt;prompt&gt;&quot;</span>)`,wrap:!1}}),tt=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGUGlwZWxpbmUlMkMlMjBJRlN1cGVyUmVzb2x1dGlvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEFpbXBvcnQlMjBnYyUwQWZyb20lMjB0cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBUNUVuY29kZXJNb2RlbCUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMkMlMjBtYWtlX2ltYWdlX2dyaWQlMEElMEF0ZXh0X2VuY29kZXIlMjAlM0QlMjBUNUVuY29kZXJNb2RlbC5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTIwc3ViZm9sZGVyJTNEJTIydGV4dF9lbmNvZGVyJTIyJTJDJTIwZGV2aWNlX21hcCUzRCUyMmF1dG8lMjIlMkMlMjBsb2FkX2luXzhiaXQlM0RUcnVlJTJDJTIwdmFyaWFudCUzRCUyMjhiaXQlMjIlMEEpJTBBJTBBJTIzJTIwdGV4dCUyMHRvJTIwaW1hZ2UlMEFwaXBlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUwQSUyMCUyMCUyMCUyMHRleHRfZW5jb2RlciUzRHRleHRfZW5jb2RlciUyQyUyMCUyMCUyMyUyMHBhc3MlMjB0aGUlMjBwcmV2aW91c2x5JTIwaW5zdGFudGlhdGVkJTIwOGJpdCUyMHRleHQlMjBlbmNvZGVyJTBBJTIwJTIwJTIwJTIwdW5ldCUzRE5vbmUlMkMlMEElMjAlMjAlMjAlMjBkZXZpY2VfbWFwJTNEJTIyYXV0byUyMiUyQyUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAnYSUyMHBob3RvJTIwb2YlMjBhJTIwa2FuZ2Fyb28lMjB3ZWFyaW5nJTIwYW4lMjBvcmFuZ2UlMjBob29kaWUlMjBhbmQlMjBibHVlJTIwc3VuZ2xhc3NlcyUyMHN0YW5kaW5nJTIwaW4lMjBmcm9udCUyMG9mJTIwdGhlJTIwZWlmZmVsJTIwdG93ZXIlMjBob2xkaW5nJTIwYSUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMCUyMnZlcnklMjBkZWVwJTIwbGVhcm5pbmclMjInJTBBcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX2VtYmVkcyUyMCUzRCUyMHBpcGUuZW5jb2RlX3Byb21wdChwcm9tcHQpJTBBJTBBJTIzJTIwUmVtb3ZlJTIwdGhlJTIwcGlwZWxpbmUlMjBzbyUyMHdlJTIwY2FuJTIwcmUtbG9hZCUyMHRoZSUyMHBpcGVsaW5lJTIwd2l0aCUyMHRoZSUyMHVuZXQlMEFkZWwlMjB0ZXh0X2VuY29kZXIlMEFkZWwlMjBwaXBlJTBBZ2MuY29sbGVjdCgpJTBBdG9yY2guY3VkYS5lbXB0eV9jYWNoZSgpJTBBJTBBcGlwZSUyMCUzRCUyMElGUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUyMGRldmljZV9tYXAlM0QlMjJhdXRvJTIyJTBBKSUwQSUwQWdlbmVyYXRvciUyMCUzRCUyMHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDApJTBBc3RhZ2VfMV9vdXRwdXQlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEEpLmltYWdlcyUwQSUwQSUyM3B0X3RvX3BpbChzdGFnZV8xX291dHB1dCklNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQSUyMyUyMFJlbW92ZSUyMHRoZSUyMHBpcGVsaW5lJTIwc28lMjB3ZSUyMGNhbiUyMGxvYWQlMjB0aGUlMjBzdXBlci1yZXNvbHV0aW9uJTIwcGlwZWxpbmUlMEFkZWwlMjBwaXBlJTBBZ2MuY29sbGVjdCgpJTBBdG9yY2guY3VkYS5lbXB0eV9jYWNoZSgpJTBBJTBBJTIzJTIwRmlyc3QlMjBzdXBlciUyMHJlc29sdXRpb24lMEElMEFwaXBlJTIwJTNEJTIwSUZTdXBlclJlc29sdXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSUktTC12MS4wJTIyJTJDJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTIwZGV2aWNlX21hcCUzRCUyMmF1dG8lMjIlMEEpJTBBJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoMCklMEFzdGFnZV8yX291dHB1dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RzdGFnZV8xX291dHB1dCUyQyUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEEpLmltYWdlcyUwQSUwQSUyM3B0X3RvX3BpbChzdGFnZV8yX291dHB1dCklNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0lJLnBuZyUyMiklMEFtYWtlX2ltYWdlX2dyaWQoJTVCcHRfdG9fcGlsKHN0YWdlXzFfb3V0cHV0KSU1QjAlNUQlMkMlMjBwdF90b19waWwoc3RhZ2VfMl9vdXRwdXQpJTVCMCU1RCU1RCUyQyUyMHJvd3MlM0QxJTJDJTIwcm93cyUzRDIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFPipeline, IFSuperResolutionPipeline
<span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">import</span> gc
<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel
<span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil, make_image_grid
text_encoder = T5EncoderModel.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, subfolder=<span class="hljs-string">&quot;text_encoder&quot;</span>, device_map=<span class="hljs-string">&quot;auto&quot;</span>, load_in_8bit=<span class="hljs-literal">True</span>, variant=<span class="hljs-string">&quot;8bit&quot;</span>
)
<span class="hljs-comment"># text to image</span>
pipe = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>,
text_encoder=text_encoder, <span class="hljs-comment"># pass the previously instantiated 8bit text encoder</span>
unet=<span class="hljs-literal">None</span>,
device_map=<span class="hljs-string">&quot;auto&quot;</span>,
)
prompt = <span class="hljs-string">&#x27;a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says &quot;very deep learning&quot;&#x27;</span>
prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
<span class="hljs-comment"># Remove the pipeline so we can re-load the pipeline with the unet</span>
<span class="hljs-keyword">del</span> text_encoder
<span class="hljs-keyword">del</span> pipe
gc.collect()
torch.cuda.empty_cache()
pipe = IFPipeline.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16, device_map=<span class="hljs-string">&quot;auto&quot;</span>
)
generator = torch.Generator().manual_seed(<span class="hljs-number">0</span>)
stage_1_output = pipe(
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
output_type=<span class="hljs-string">&quot;pt&quot;</span>,
generator=generator,
).images
<span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save(&quot;./if_stage_I.png&quot;)</span>
<span class="hljs-comment"># Remove the pipeline so we can load the super-resolution pipeline</span>
<span class="hljs-keyword">del</span> pipe
gc.collect()
torch.cuda.empty_cache()
<span class="hljs-comment"># First super resolution</span>
pipe = IFSuperResolutionPipeline.from_pretrained(
<span class="hljs-string">&quot;DeepFloyd/IF-II-L-v1.0&quot;</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">&quot;fp16&quot;</span>, torch_dtype=torch.float16, device_map=<span class="hljs-string">&quot;auto&quot;</span>
)
generator = torch.Generator().manual_seed(<span class="hljs-number">0</span>)
stage_2_output = pipe(
image=stage_1_output,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
output_type=<span class="hljs-string">&quot;pt&quot;</span>,
generator=generator,
).images
<span class="hljs-comment">#pt_to_pil(stage_2_output)[0].save(&quot;./if_stage_II.png&quot;)</span>
make_image_grid([pt_to_pil(stage_1_output)[<span class="hljs-number">0</span>], pt_to_pil(stage_2_output)[<span class="hljs-number">0</span>]], rows=<span class="hljs-number">1</span>, rows=<span class="hljs-number">2</span>)`,wrap:!1}}),nt=new v({props:{title:"Available Pipelines:",local:"available-pipelines",headingTag:"h2"}}),at=new v({props:{title:"IFPipeline",local:"diffusers.IFPipeline",headingTag:"h2"}}),lt=new Z({props:{name:"class diffusers.IFPipeline",anchor:"diffusers.IFPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py#L96"}}),ot=new Z({props:{name:"__call__",anchor:"diffusers.IFPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 100"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"clean_caption",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"}],parametersDescription:[{anchor:"diffusers.IFPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.IFPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code>
timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) &#x2014;
The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.IFPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) &#x2014;
The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.IFPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only
applies to <a href="/docs/diffusers/pr_12262/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that will be called every <code>callback_steps</code> steps during inference. The function will be
called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be
called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.`,name:"clean_caption"},{anchor:"diffusers.IFPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py#L547",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p>
`}}),P=new At({props:{anchor:"diffusers.IFPipeline.__call__.example",$$slots:{default:[Ha]},$$scope:{ctx:j}}}),it=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead.
Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) &#x2014;
If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py#L168"}}),pt=new v({props:{title:"IFSuperResolutionPipeline",local:"diffusers.IFSuperResolutionPipeline",headingTag:"h2"}}),rt=new Z({props:{name:"class diffusers.IFSuperResolutionPipeline",anchor:"diffusers.IFSuperResolutionPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"image_noising_scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py#L82"}}),ct=new Z({props:{name:"__call__",anchor:"diffusers.IFSuperResolutionPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"height",val:": int = None"},{name:"width",val:": int = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 4.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"noise_level",val:": int = 250"},{name:"clean_caption",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.IFSuperResolutionPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014;
The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014;
The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>) &#x2014;
The image to be upscaled.`,name:"image"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>, defaults to None) &#x2014;
Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code>
timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.0) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only
applies to <a href="/docs/diffusers/pr_12262/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that will be called every <code>callback_steps</code> steps during inference. The function will be
called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be
called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.noise_level",description:`<strong>noise_level</strong> (<code>int</code>, <em>optional</em>, defaults to 250) &#x2014;
The amount of noise to add to the upscaled image. Must be in the range <code>[0, 1000)</code>`,name:"noise_level"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py#L614",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p>
`}}),z=new At({props:{anchor:"diffusers.IFSuperResolutionPipeline.__call__.example",$$slots:{default:[za]},$$scope:{ctx:j}}}),dt=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead.
Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) &#x2014;
If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py#L302"}}),mt=new v({props:{title:"IFImg2ImgPipeline",local:"diffusers.IFImg2ImgPipeline",headingTag:"h2"}}),gt=new Z({props:{name:"class diffusers.IFImg2ImgPipeline",anchor:"diffusers.IFImg2ImgPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py#L120"}}),ut=new Z({props:{name:"__call__",anchor:"diffusers.IFImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"strength",val:": float = 0.7"},{name:"num_inference_steps",val:": int = 80"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 10.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"clean_caption",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"}],parametersDescription:[{anchor:"diffusers.IFImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) &#x2014;
<code>Image</code>, or tensor representing an image batch, that will be used as the starting point for the
process.`,name:"image"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.7) &#x2014;
Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code>
will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of
denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
<code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 80) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code>
timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 10.0) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only
applies to <a href="/docs/diffusers/pr_12262/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that will be called every <code>callback_steps</code> steps during inference. The function will be
called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be
called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.`,name:"clean_caption"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py#L661",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p>
`}}),L=new At({props:{anchor:"diffusers.IFImg2ImgPipeline.__call__.example",$$slots:{default:[qa]},$$scope:{ctx:j}}}),ft=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead.
Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) &#x2014;
If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py#L192"}}),ht=new v({props:{title:"IFImg2ImgSuperResolutionPipeline",local:"diffusers.IFImg2ImgSuperResolutionPipeline",headingTag:"h2"}}),_t=new Z({props:{name:"class diffusers.IFImg2ImgSuperResolutionPipeline",anchor:"diffusers.IFImg2ImgSuperResolutionPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"image_noising_scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py#L124"}}),yt=new Z({props:{name:"__call__",anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor]"},{name:"original_image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"strength",val:": float = 0.8"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 4.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"noise_level",val:": int = 250"},{name:"clean_caption",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) &#x2014;
<code>Image</code>, or tensor representing an image batch, that will be used as the starting point for the
process.`,name:"image"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.original_image",description:`<strong>original_image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) &#x2014;
The original image that <code>image</code> was varied from.`,name:"original_image"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) &#x2014;
Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code>
will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of
denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
<code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code>
timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.0) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only
applies to <a href="/docs/diffusers/pr_12262/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that will be called every <code>callback_steps</code> steps during inference. The function will be
called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be
called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.noise_level",description:`<strong>noise_level</strong> (<code>int</code>, <em>optional</em>, defaults to 250) &#x2014;
The amount of noise to add to the upscaled image. Must be in the range <code>[0, 1000)</code>`,name:"noise_level"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py#L744",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p>
`}}),A=new At({props:{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.example",$$slots:{default:[La]},$$scope:{ctx:j}}}),Mt=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead.
Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) &#x2014;
If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py#L344"}}),bt=new v({props:{title:"IFInpaintingPipeline",local:"diffusers.IFInpaintingPipeline",headingTag:"h2"}}),It=new Z({props:{name:"class diffusers.IFInpaintingPipeline",anchor:"diffusers.IFInpaintingPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py#L123"}}),wt=new Z({props:{name:"__call__",anchor:"diffusers.IFInpaintingPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"strength",val:": float = 1.0"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"clean_caption",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"}],parametersDescription:[{anchor:"diffusers.IFInpaintingPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.IFInpaintingPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) &#x2014;
<code>Image</code>, or tensor representing an image batch, that will be used as the starting point for the
process.`,name:"image"},{anchor:"diffusers.IFInpaintingPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>PIL.Image.Image</code>) &#x2014;
<code>Image</code>, or tensor representing an image batch, to mask <code>image</code>. White pixels in the mask will be
repainted, while black pixels will be preserved. If <code>mask_image</code> is a PIL image, it will be converted
to a single channel (luminance) before use. If it&#x2019;s a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be <code>(B, H, W, 1)</code>.`,name:"mask_image"},{anchor:"diffusers.IFInpaintingPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014;
Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code>
will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of
denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
<code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.IFInpaintingPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFInpaintingPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code>
timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFInpaintingPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFInpaintingPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFInpaintingPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFInpaintingPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only
applies to <a href="/docs/diffusers/pr_12262/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFInpaintingPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFInpaintingPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFInpaintingPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFInpaintingPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFInpaintingPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFInpaintingPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that will be called every <code>callback_steps</code> steps during inference. The function will be
called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFInpaintingPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be
called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFInpaintingPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.`,name:"clean_caption"},{anchor:"diffusers.IFInpaintingPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py#L753",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p>
`}}),O=new At({props:{anchor:"diffusers.IFInpaintingPipeline.__call__.example",$$slots:{default:[Da]},$$scope:{ctx:j}}}),Jt=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFInpaintingPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead.
Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) &#x2014;
If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py#L195"}}),Ut=new v({props:{title:"IFInpaintingSuperResolutionPipeline",local:"diffusers.IFInpaintingSuperResolutionPipeline",headingTag:"h2"}}),Tt=new Z({props:{name:"class diffusers.IFInpaintingSuperResolutionPipeline",anchor:"diffusers.IFInpaintingSuperResolutionPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"image_noising_scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py#L126"}}),Zt=new Z({props:{name:"__call__",anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor]"},{name:"original_image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"strength",val:": float = 0.8"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 100"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 4.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"noise_level",val:": int = 0"},{name:"clean_caption",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) &#x2014;
<code>Image</code>, or tensor representing an image batch, that will be used as the starting point for the
process.`,name:"image"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.original_image",description:`<strong>original_image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) &#x2014;
The original image that <code>image</code> was varied from.`,name:"original_image"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>PIL.Image.Image</code>) &#x2014;
<code>Image</code>, or tensor representing an image batch, to mask <code>image</code>. White pixels in the mask will be
repainted, while black pixels will be preserved. If <code>mask_image</code> is a PIL image, it will be converted
to a single channel (luminance) before use. If it&#x2019;s a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be <code>(B, H, W, 1)</code>.`,name:"mask_image"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) &#x2014;
Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code>
will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of
denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
<code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code>
timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.0) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only
applies to <a href="/docs/diffusers/pr_12262/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that will be called every <code>callback_steps</code> steps during inference. The function will be
called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be
called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.noise_level",description:`<strong>noise_level</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014;
The amount of noise to add to the upscaled image. Must be in the range <code>[0, 1000)</code>`,name:"noise_level"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py#L832",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p>
`}}),te=new At({props:{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.example",$$slots:{default:[Aa]},$$scope:{ctx:j}}}),vt=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead.
Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) &#x2014;
If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12262/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py#L346"}}),jt=new Pa({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/deepfloyd_if.md"}}),{c(){f=i("meta"),w=a(),b=i("p"),_=a(),r(M.$$.fragment),o=a(),I=i("div"),I.innerHTML=As,en=a(),r(se.$$.fragment),tn=a(),ae=i("p"),ae.textContent=Ks,nn=a(),le=i("ul"),le.innerHTML=Os,sn=a(),r(oe.$$.fragment),an=a(),ie=i("p"),ie.textContent=ea,ln=a(),pe=i("ol"),pe.innerHTML=ta,on=a(),r(re.$$.fragment),pn=a(),ce=i("p"),ce.textContent=na,rn=a(),r(de.$$.fragment),cn=a(),me=i("p"),me.innerHTML=sa,dn=a(),ge=i("p"),ge.innerHTML=aa,mn=a(),r(ue.$$.fragment),gn=a(),fe=i("p"),fe.textContent=la,un=a(),he=i("ul"),he.innerHTML=oa,fn=a(),_e=i("p"),_e.innerHTML=ia,hn=a(),ye=i("ul"),ye.innerHTML=pa,_n=a(),Me=i("p"),Me.innerHTML=ra,yn=a(),r(be.$$.fragment),Mn=a(),Ie=i("p"),Ie.innerHTML=ca,bn=a(),r(we.$$.fragment),In=a(),r(Je.$$.fragment),wn=a(),Ue=i("p"),Ue.innerHTML=da,Jn=a(),Te=i("p"),Te.innerHTML=ma,Un=a(),r(Ze.$$.fragment),Tn=a(),r(ve.$$.fragment),Zn=a(),je=i("p"),je.innerHTML=ga,vn=a(),We=i("p"),We.innerHTML=ua,jn=a(),r(Be.$$.fragment),Wn=a(),r(Fe.$$.fragment),Bn=a(),Xe=i("p"),Xe.innerHTML=fa,Fn=a(),r(ke.$$.fragment),Xn=a(),r(Ge.$$.fragment),kn=a(),Ce=i("p"),Ce.textContent=ha,Gn=a(),r(Re.$$.fragment),Cn=a(),Ve=i("p"),Ve.textContent=_a,Rn=a(),xe=i("p"),xe.innerHTML=ya,Vn=a(),r(Se.$$.fragment),xn=a(),Ee=i("p"),Ee.innerHTML=Ma,Sn=a(),r(Ne.$$.fragment),En=a(),Qe=i("p"),Qe.textContent=ba,Nn=a(),r($e.$$.fragment),Qn=a(),Ye=i("p"),Ye.innerHTML=Ia,$n=a(),r(Pe.$$.fragment),Yn=a(),r(He.$$.fragment),Pn=a(),ze=i("p"),ze.textContent=wa,Hn=a(),qe=i("p"),qe.textContent=Ja,zn=a(),r(Le.$$.fragment),qn=a(),De=i("p"),De.textContent=Ua,Ln=a(),r(Ae.$$.fragment),Dn=a(),Ke=i("p"),Ke.textContent=Ta,An=a(),r(Oe.$$.fragment),Kn=a(),et=i("p"),et.textContent=Za,On=a(),r(tt.$$.fragment),es=a(),r(nt.$$.fragment),ts=a(),st=i("table"),st.innerHTML=va,ns=a(),r(at.$$.fragment),ss=a(),W=i("div"),r(lt.$$.fragment),Is=a(),C=i("div"),r(ot.$$.fragment),ws=a(),Ft=i("p"),Ft.textContent=ja,Js=a(),r(P.$$.fragment),Us=a(),H=i("div"),r(it.$$.fragment),Ts=a(),Xt=i("p"),Xt.textContent=Wa,as=a(),r(pt.$$.fragment),ls=a(),B=i("div"),r(rt.$$.fragment),Zs=a(),R=i("div"),r(ct.$$.fragment),vs=a(),kt=i("p"),kt.textContent=Ba,js=a(),r(z.$$.fragment),Ws=a(),q=i("div"),r(dt.$$.fragment),Bs=a(),Gt=i("p"),Gt.textContent=Fa,os=a(),r(mt.$$.fragment),is=a(),F=i("div"),r(gt.$$.fragment),Fs=a(),V=i("div"),r(ut.$$.fragment),Xs=a(),Ct=i("p"),Ct.textContent=Xa,ks=a(),r(L.$$.fragment),Gs=a(),D=i("div"),r(ft.$$.fragment),Cs=a(),Rt=i("p"),Rt.textContent=ka,ps=a(),r(ht.$$.fragment),rs=a(),X=i("div"),r(_t.$$.fragment),Rs=a(),x=i("div"),r(yt.$$.fragment),Vs=a(),Vt=i("p"),Vt.textContent=Ga,xs=a(),r(A.$$.fragment),Ss=a(),K=i("div"),r(Mt.$$.fragment),Es=a(),xt=i("p"),xt.textContent=Ca,cs=a(),r(bt.$$.fragment),ds=a(),k=i("div"),r(It.$$.fragment),Ns=a(),S=i("div"),r(wt.$$.fragment),Qs=a(),St=i("p"),St.textContent=Ra,$s=a(),r(O.$$.fragment),Ys=a(),ee=i("div"),r(Jt.$$.fragment),Ps=a(),Et=i("p"),Et.textContent=Va,ms=a(),r(Ut.$$.fragment),gs=a(),G=i("div"),r(Tt.$$.fragment),Hs=a(),E=i("div"),r(Zt.$$.fragment),zs=a(),Nt=i("p"),Nt.textContent=xa,qs=a(),r(te.$$.fragment),Ls=a(),ne=i("div"),r(vt.$$.fragment),Ds=a(),Qt=i("p"),Qt.textContent=Sa,us=a(),r(jt.$$.fragment),fs=a(),Ot=i("p"),this.h()},l(e){const t=Ya("svelte-u9bgzb",document.head);f=p(t,"META",{name:!0,content:!0}),t.forEach(n),w=l(e),b=p(e,"P",{}),T(b).forEach(n),_=l(e),c(M.$$.fragment,e),o=l(e),I=p(e,"DIV",{class:!0,"data-svelte-h":!0}),y(I)!=="svelte-1elo7hh"&&(I.innerHTML=As),en=l(e),c(se.$$.fragment,e),tn=l(e),ae=p(e,"P",{"data-svelte-h":!0}),y(ae)!=="svelte-1234a12"&&(ae.textContent=Ks),nn=l(e),le=p(e,"UL",{"data-svelte-h":!0}),y(le)!=="svelte-1h0q5fq"&&(le.innerHTML=Os),sn=l(e),c(oe.$$.fragment,e),an=l(e),ie=p(e,"P",{"data-svelte-h":!0}),y(ie)!=="svelte-13gixbp"&&(ie.textContent=ea),ln=l(e),pe=p(e,"OL",{"data-svelte-h":!0}),y(pe)!=="svelte-fe0b7x"&&(pe.innerHTML=ta),on=l(e),c(re.$$.fragment,e),pn=l(e),ce=p(e,"P",{"data-svelte-h":!0}),y(ce)!=="svelte-1eqisl"&&(ce.textContent=na),rn=l(e),c(de.$$.fragment,e),cn=l(e),me=p(e,"P",{"data-svelte-h":!0}),y(me)!=="svelte-1vhgvls"&&(me.innerHTML=sa),dn=l(e),ge=p(e,"P",{"data-svelte-h":!0}),y(ge)!=="svelte-13wvpdg"&&(ge.innerHTML=aa),mn=l(e),c(ue.$$.fragment,e),gn=l(e),fe=p(e,"P",{"data-svelte-h":!0}),y(fe)!=="svelte-12efsp0"&&(fe.textContent=la),un=l(e),he=p(e,"UL",{"data-svelte-h":!0}),y(he)!=="svelte-1j0zpko"&&(he.innerHTML=oa),fn=l(e),_e=p(e,"P",{"data-svelte-h":!0}),y(_e)!=="svelte-6cosrd"&&(_e.innerHTML=ia),hn=l(e),ye=p(e,"UL",{"data-svelte-h":!0}),y(ye)!=="svelte-1bclroz"&&(ye.innerHTML=pa),_n=l(e),Me=p(e,"P",{"data-svelte-h":!0}),y(Me)!=="svelte-10fuv2z"&&(Me.innerHTML=ra),yn=l(e),c(be.$$.fragment,e),Mn=l(e),Ie=p(e,"P",{"data-svelte-h":!0}),y(Ie)!=="svelte-1yeiafg"&&(Ie.innerHTML=ca),bn=l(e),c(we.$$.fragment,e),In=l(e),c(Je.$$.fragment,e),wn=l(e),Ue=p(e,"P",{"data-svelte-h":!0}),y(Ue)!=="svelte-1rk6rku"&&(Ue.innerHTML=da),Jn=l(e),Te=p(e,"P",{"data-svelte-h":!0}),y(Te)!=="svelte-r7lwna"&&(Te.innerHTML=ma),Un=l(e),c(Ze.$$.fragment,e),Tn=l(e),c(ve.$$.fragment,e),Zn=l(e),je=p(e,"P",{"data-svelte-h":!0}),y(je)!=="svelte-1yi033a"&&(je.innerHTML=ga),vn=l(e),We=p(e,"P",{"data-svelte-h":!0}),y(We)!=="svelte-8txrxq"&&(We.innerHTML=ua),jn=l(e),c(Be.$$.fragment,e),Wn=l(e),c(Fe.$$.fragment,e),Bn=l(e),Xe=p(e,"P",{"data-svelte-h":!0}),y(Xe)!=="svelte-yqwmip"&&(Xe.innerHTML=fa),Fn=l(e),c(ke.$$.fragment,e),Xn=l(e),c(Ge.$$.fragment,e),kn=l(e),Ce=p(e,"P",{"data-svelte-h":!0}),y(Ce)!=="svelte-15gngr3"&&(Ce.textContent=ha),Gn=l(e),c(Re.$$.fragment,e),Cn=l(e),Ve=p(e,"P",{"data-svelte-h":!0}),y(Ve)!=="svelte-12zfq1b"&&(Ve.textContent=_a),Rn=l(e),xe=p(e,"P",{"data-svelte-h":!0}),y(xe)!=="svelte-1w6wjdd"&&(xe.innerHTML=ya),Vn=l(e),c(Se.$$.fragment,e),xn=l(e),Ee=p(e,"P",{"data-svelte-h":!0}),y(Ee)!=="svelte-1t30w48"&&(Ee.innerHTML=Ma),Sn=l(e),c(Ne.$$.fragment,e),En=l(e),Qe=p(e,"P",{"data-svelte-h":!0}),y(Qe)!=="svelte-4yc4g0"&&(Qe.textContent=ba),Nn=l(e),c($e.$$.fragment,e),Qn=l(e),Ye=p(e,"P",{"data-svelte-h":!0}),y(Ye)!=="svelte-cpkwz5"&&(Ye.innerHTML=Ia),$n=l(e),c(Pe.$$.fragment,e),Yn=l(e),c(He.$$.fragment,e),Pn=l(e),ze=p(e,"P",{"data-svelte-h":!0}),y(ze)!=="svelte-klajpi"&&(ze.textContent=wa),Hn=l(e),qe=p(e,"P",{"data-svelte-h":!0}),y(qe)!=="svelte-6d4mux"&&(qe.textContent=Ja),zn=l(e),c(Le.$$.fragment,e),qn=l(e),De=p(e,"P",{"data-svelte-h":!0}),y(De)!=="svelte-mhfn46"&&(De.textContent=Ua),Ln=l(e),c(Ae.$$.fragment,e),Dn=l(e),Ke=p(e,"P",{"data-svelte-h":!0}),y(Ke)!=="svelte-b2fswh"&&(Ke.textContent=Ta),An=l(e),c(Oe.$$.fragment,e),Kn=l(e),et=p(e,"P",{"data-svelte-h":!0}),y(et)!=="svelte-11csze6"&&(et.textContent=Za),On=l(e),c(tt.$$.fragment,e),es=l(e),c(nt.$$.fragment,e),ts=l(e),st=p(e,"TABLE",{"data-svelte-h":!0}),y(st)!=="svelte-5xvyn6"&&(st.innerHTML=va),ns=l(e),c(at.$$.fragment,e),ss=l(e),W=p(e,"DIV",{class:!0});var N=T(W);c(lt.$$.fragment,N),Is=l(N),C=p(N,"DIV",{class:!0});var Q=T(C);c(ot.$$.fragment,Q),ws=l(Q),Ft=p(Q,"P",{"data-svelte-h":!0}),y(Ft)!=="svelte-v78lg8"&&(Ft.textContent=ja),Js=l(Q),c(P.$$.fragment,Q),Q.forEach(n),Us=l(N),H=p(N,"DIV",{class:!0});var Wt=T(H);c(it.$$.fragment,Wt),Ts=l(Wt),Xt=p(Wt,"P",{"data-svelte-h":!0}),y(Xt)!=="svelte-16q0ax1"&&(Xt.textContent=Wa),Wt.forEach(n),N.forEach(n),as=l(e),c(pt.$$.fragment,e),ls=l(e),B=p(e,"DIV",{class:!0});var $=T(B);c(rt.$$.fragment,$),Zs=l($),R=p($,"DIV",{class:!0});var Y=T(R);c(ct.$$.fragment,Y),vs=l(Y),kt=p(Y,"P",{"data-svelte-h":!0}),y(kt)!=="svelte-v78lg8"&&(kt.textContent=Ba),js=l(Y),c(z.$$.fragment,Y),Y.forEach(n),Ws=l($),q=p($,"DIV",{class:!0});var Bt=T(q);c(dt.$$.fragment,Bt),Bs=l(Bt),Gt=p(Bt,"P",{"data-svelte-h":!0}),y(Gt)!=="svelte-16q0ax1"&&(Gt.textContent=Fa),Bt.forEach(n),$.forEach(n),os=l(e),c(mt.$$.fragment,e),is=l(e),F=p(e,"DIV",{class:!0});var $t=T(F);c(gt.$$.fragment,$t),Fs=l($t),V=p($t,"DIV",{class:!0});var Yt=T(V);c(ut.$$.fragment,Yt),Xs=l(Yt),Ct=p(Yt,"P",{"data-svelte-h":!0}),y(Ct)!=="svelte-v78lg8"&&(Ct.textContent=Xa),ks=l(Yt),c(L.$$.fragment,Yt),Yt.forEach(n),Gs=l($t),D=p($t,"DIV",{class:!0});var _s=T(D);c(ft.$$.fragment,_s),Cs=l(_s),Rt=p(_s,"P",{"data-svelte-h":!0}),y(Rt)!=="svelte-16q0ax1"&&(Rt.textContent=ka),_s.forEach(n),$t.forEach(n),ps=l(e),c(ht.$$.fragment,e),rs=l(e),X=p(e,"DIV",{class:!0});var Pt=T(X);c(_t.$$.fragment,Pt),Rs=l(Pt),x=p(Pt,"DIV",{class:!0});var Ht=T(x);c(yt.$$.fragment,Ht),Vs=l(Ht),Vt=p(Ht,"P",{"data-svelte-h":!0}),y(Vt)!=="svelte-v78lg8"&&(Vt.textContent=Ga),xs=l(Ht),c(A.$$.fragment,Ht),Ht.forEach(n),Ss=l(Pt),K=p(Pt,"DIV",{class:!0});var ys=T(K);c(Mt.$$.fragment,ys),Es=l(ys),xt=p(ys,"P",{"data-svelte-h":!0}),y(xt)!=="svelte-16q0ax1"&&(xt.textContent=Ca),ys.forEach(n),Pt.forEach(n),cs=l(e),c(bt.$$.fragment,e),ds=l(e),k=p(e,"DIV",{class:!0});var zt=T(k);c(It.$$.fragment,zt),Ns=l(zt),S=p(zt,"DIV",{class:!0});var qt=T(S);c(wt.$$.fragment,qt),Qs=l(qt),St=p(qt,"P",{"data-svelte-h":!0}),y(St)!=="svelte-v78lg8"&&(St.textContent=Ra),$s=l(qt),c(O.$$.fragment,qt),qt.forEach(n),Ys=l(zt),ee=p(zt,"DIV",{class:!0});var Ms=T(ee);c(Jt.$$.fragment,Ms),Ps=l(Ms),Et=p(Ms,"P",{"data-svelte-h":!0}),y(Et)!=="svelte-16q0ax1"&&(Et.textContent=Va),Ms.forEach(n),zt.forEach(n),ms=l(e),c(Ut.$$.fragment,e),gs=l(e),G=p(e,"DIV",{class:!0});var Lt=T(G);c(Tt.$$.fragment,Lt),Hs=l(Lt),E=p(Lt,"DIV",{class:!0});var Dt=T(E);c(Zt.$$.fragment,Dt),zs=l(Dt),Nt=p(Dt,"P",{"data-svelte-h":!0}),y(Nt)!=="svelte-v78lg8"&&(Nt.textContent=xa),qs=l(Dt),c(te.$$.fragment,Dt),Dt.forEach(n),Ls=l(Lt),ne=p(Lt,"DIV",{class:!0});var bs=T(ne);c(vt.$$.fragment,bs),Ds=l(bs),Qt=p(bs,"P",{"data-svelte-h":!0}),y(Qt)!=="svelte-16q0ax1"&&(Qt.textContent=Sa),bs.forEach(n),Lt.forEach(n),us=l(e),c(jt.$$.fragment,e),fs=l(e),Ot=p(e,"P",{}),T(Ot).forEach(n),this.h()},h(){U(f,"name","hf:doc:metadata"),U(f,"content",Oa),U(I,"class","flex flex-wrap space-x-1"),U(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){h(document.head,f),s(e,w,t),s(e,b,t),s(e,_,t),d(M,e,t),s(e,o,t),s(e,I,t),s(e,en,t),d(se,e,t),s(e,tn,t),s(e,ae,t),s(e,nn,t),s(e,le,t),s(e,sn,t),d(oe,e,t),s(e,an,t),s(e,ie,t),s(e,ln,t),s(e,pe,t),s(e,on,t),d(re,e,t),s(e,pn,t),s(e,ce,t),s(e,rn,t),d(de,e,t),s(e,cn,t),s(e,me,t),s(e,dn,t),s(e,ge,t),s(e,mn,t),d(ue,e,t),s(e,gn,t),s(e,fe,t),s(e,un,t),s(e,he,t),s(e,fn,t),s(e,_e,t),s(e,hn,t),s(e,ye,t),s(e,_n,t),s(e,Me,t),s(e,yn,t),d(be,e,t),s(e,Mn,t),s(e,Ie,t),s(e,bn,t),d(we,e,t),s(e,In,t),d(Je,e,t),s(e,wn,t),s(e,Ue,t),s(e,Jn,t),s(e,Te,t),s(e,Un,t),d(Ze,e,t),s(e,Tn,t),d(ve,e,t),s(e,Zn,t),s(e,je,t),s(e,vn,t),s(e,We,t),s(e,jn,t),d(Be,e,t),s(e,Wn,t),d(Fe,e,t),s(e,Bn,t),s(e,Xe,t),s(e,Fn,t),d(ke,e,t),s(e,Xn,t),d(Ge,e,t),s(e,kn,t),s(e,Ce,t),s(e,Gn,t),d(Re,e,t),s(e,Cn,t),s(e,Ve,t),s(e,Rn,t),s(e,xe,t),s(e,Vn,t),d(Se,e,t),s(e,xn,t),s(e,Ee,t),s(e,Sn,t),d(Ne,e,t),s(e,En,t),s(e,Qe,t),s(e,Nn,t),d($e,e,t),s(e,Qn,t),s(e,Ye,t),s(e,$n,t),d(Pe,e,t),s(e,Yn,t),d(He,e,t),s(e,Pn,t),s(e,ze,t),s(e,Hn,t),s(e,qe,t),s(e,zn,t),d(Le,e,t),s(e,qn,t),s(e,De,t),s(e,Ln,t),d(Ae,e,t),s(e,Dn,t),s(e,Ke,t),s(e,An,t),d(Oe,e,t),s(e,Kn,t),s(e,et,t),s(e,On,t),d(tt,e,t),s(e,es,t),d(nt,e,t),s(e,ts,t),s(e,st,t),s(e,ns,t),d(at,e,t),s(e,ss,t),s(e,W,t),d(lt,W,null),h(W,Is),h(W,C),d(ot,C,null),h(C,ws),h(C,Ft),h(C,Js),d(P,C,null),h(W,Us),h(W,H),d(it,H,null),h(H,Ts),h(H,Xt),s(e,as,t),d(pt,e,t),s(e,ls,t),s(e,B,t),d(rt,B,null),h(B,Zs),h(B,R),d(ct,R,null),h(R,vs),h(R,kt),h(R,js),d(z,R,null),h(B,Ws),h(B,q),d(dt,q,null),h(q,Bs),h(q,Gt),s(e,os,t),d(mt,e,t),s(e,is,t),s(e,F,t),d(gt,F,null),h(F,Fs),h(F,V),d(ut,V,null),h(V,Xs),h(V,Ct),h(V,ks),d(L,V,null),h(F,Gs),h(F,D),d(ft,D,null),h(D,Cs),h(D,Rt),s(e,ps,t),d(ht,e,t),s(e,rs,t),s(e,X,t),d(_t,X,null),h(X,Rs),h(X,x),d(yt,x,null),h(x,Vs),h(x,Vt),h(x,xs),d(A,x,null),h(X,Ss),h(X,K),d(Mt,K,null),h(K,Es),h(K,xt),s(e,cs,t),d(bt,e,t),s(e,ds,t),s(e,k,t),d(It,k,null),h(k,Ns),h(k,S),d(wt,S,null),h(S,Qs),h(S,St),h(S,$s),d(O,S,null),h(k,Ys),h(k,ee),d(Jt,ee,null),h(ee,Ps),h(ee,Et),s(e,ms,t),d(Ut,e,t),s(e,gs,t),s(e,G,t),d(Tt,G,null),h(G,Hs),h(G,E),d(Zt,E,null),h(E,zs),h(E,Nt),h(E,qs),d(te,E,null),h(G,Ls),h(G,ne),d(vt,ne,null),h(ne,Ds),h(ne,Qt),s(e,us,t),d(jt,e,t),s(e,fs,t),s(e,Ot,t),hs=!0},p(e,[t]){const N={};t&2&&(N.$$scope={dirty:t,ctx:e}),P.$set(N);const Q={};t&2&&(Q.$$scope={dirty:t,ctx:e}),z.$set(Q);const Wt={};t&2&&(Wt.$$scope={dirty:t,ctx:e}),L.$set(Wt);const $={};t&2&&($.$$scope={dirty:t,ctx:e}),A.$set($);const Y={};t&2&&(Y.$$scope={dirty:t,ctx:e}),O.$set(Y);const Bt={};t&2&&(Bt.$$scope={dirty:t,ctx:e}),te.$set(Bt)},i(e){hs||(m(M.$$.fragment,e),m(se.$$.fragment,e),m(oe.$$.fragment,e),m(re.$$.fragment,e),m(de.$$.fragment,e),m(ue.$$.fragment,e),m(be.$$.fragment,e),m(we.$$.fragment,e),m(Je.$$.fragment,e),m(Ze.$$.fragment,e),m(ve.$$.fragment,e),m(Be.$$.fragment,e),m(Fe.$$.fragment,e),m(ke.$$.fragment,e),m(Ge.$$.fragment,e),m(Re.$$.fragment,e),m(Se.$$.fragment,e),m(Ne.$$.fragment,e),m($e.$$.fragment,e),m(Pe.$$.fragment,e),m(He.$$.fragment,e),m(Le.$$.fragment,e),m(Ae.$$.fragment,e),m(Oe.$$.fragment,e),m(tt.$$.fragment,e),m(nt.$$.fragment,e),m(at.$$.fragment,e),m(lt.$$.fragment,e),m(ot.$$.fragment,e),m(P.$$.fragment,e),m(it.$$.fragment,e),m(pt.$$.fragment,e),m(rt.$$.fragment,e),m(ct.$$.fragment,e),m(z.$$.fragment,e),m(dt.$$.fragment,e),m(mt.$$.fragment,e),m(gt.$$.fragment,e),m(ut.$$.fragment,e),m(L.$$.fragment,e),m(ft.$$.fragment,e),m(ht.$$.fragment,e),m(_t.$$.fragment,e),m(yt.$$.fragment,e),m(A.$$.fragment,e),m(Mt.$$.fragment,e),m(bt.$$.fragment,e),m(It.$$.fragment,e),m(wt.$$.fragment,e),m(O.$$.fragment,e),m(Jt.$$.fragment,e),m(Ut.$$.fragment,e),m(Tt.$$.fragment,e),m(Zt.$$.fragment,e),m(te.$$.fragment,e),m(vt.$$.fragment,e),m(jt.$$.fragment,e),hs=!0)},o(e){g(M.$$.fragment,e),g(se.$$.fragment,e),g(oe.$$.fragment,e),g(re.$$.fragment,e),g(de.$$.fragment,e),g(ue.$$.fragment,e),g(be.$$.fragment,e),g(we.$$.fragment,e),g(Je.$$.fragment,e),g(Ze.$$.fragment,e),g(ve.$$.fragment,e),g(Be.$$.fragment,e),g(Fe.$$.fragment,e),g(ke.$$.fragment,e),g(Ge.$$.fragment,e),g(Re.$$.fragment,e),g(Se.$$.fragment,e),g(Ne.$$.fragment,e),g($e.$$.fragment,e),g(Pe.$$.fragment,e),g(He.$$.fragment,e),g(Le.$$.fragment,e),g(Ae.$$.fragment,e),g(Oe.$$.fragment,e),g(tt.$$.fragment,e),g(nt.$$.fragment,e),g(at.$$.fragment,e),g(lt.$$.fragment,e),g(ot.$$.fragment,e),g(P.$$.fragment,e),g(it.$$.fragment,e),g(pt.$$.fragment,e),g(rt.$$.fragment,e),g(ct.$$.fragment,e),g(z.$$.fragment,e),g(dt.$$.fragment,e),g(mt.$$.fragment,e),g(gt.$$.fragment,e),g(ut.$$.fragment,e),g(L.$$.fragment,e),g(ft.$$.fragment,e),g(ht.$$.fragment,e),g(_t.$$.fragment,e),g(yt.$$.fragment,e),g(A.$$.fragment,e),g(Mt.$$.fragment,e),g(bt.$$.fragment,e),g(It.$$.fragment,e),g(wt.$$.fragment,e),g(O.$$.fragment,e),g(Jt.$$.fragment,e),g(Ut.$$.fragment,e),g(Tt.$$.fragment,e),g(Zt.$$.fragment,e),g(te.$$.fragment,e),g(vt.$$.fragment,e),g(jt.$$.fragment,e),hs=!1},d(e){e&&(n(w),n(b),n(_),n(o),n(I),n(en),n(tn),n(ae),n(nn),n(le),n(sn),n(an),n(ie),n(ln),n(pe),n(on),n(pn),n(ce),n(rn),n(cn),n(me),n(dn),n(ge),n(mn),n(gn),n(fe),n(un),n(he),n(fn),n(_e),n(hn),n(ye),n(_n),n(Me),n(yn),n(Mn),n(Ie),n(bn),n(In),n(wn),n(Ue),n(Jn),n(Te),n(Un),n(Tn),n(Zn),n(je),n(vn),n(We),n(jn),n(Wn),n(Bn),n(Xe),n(Fn),n(Xn),n(kn),n(Ce),n(Gn),n(Cn),n(Ve),n(Rn),n(xe),n(Vn),n(xn),n(Ee),n(Sn),n(En),n(Qe),n(Nn),n(Qn),n(Ye),n($n),n(Yn),n(Pn),n(ze),n(Hn),n(qe),n(zn),n(qn),n(De),n(Ln),n(Dn),n(Ke),n(An),n(Kn),n(et),n(On),n(es),n(ts),n(st),n(ns),n(ss),n(W),n(as),n(ls),n(B),n(os),n(is),n(F),n(ps),n(rs),n(X),n(cs),n(ds),n(k),n(ms),n(gs),n(G),n(us),n(fs),n(Ot)),n(f),u(M,e),u(se,e),u(oe,e),u(re,e),u(de,e),u(ue,e),u(be,e),u(we,e),u(Je,e),u(Ze,e),u(ve,e),u(Be,e),u(Fe,e),u(ke,e),u(Ge,e),u(Re,e),u(Se,e),u(Ne,e),u($e,e),u(Pe,e),u(He,e),u(Le,e),u(Ae,e),u(Oe,e),u(tt,e),u(nt,e),u(at,e),u(lt),u(ot),u(P),u(it),u(pt,e),u(rt),u(ct),u(z),u(dt),u(mt,e),u(gt),u(ut),u(L),u(ft),u(ht,e),u(_t),u(yt),u(A),u(Mt),u(bt,e),u(It),u(wt),u(O),u(Jt),u(Ut,e),u(Tt),u(Zt),u(te),u(vt),u(jt,e)}}}const Oa='{"title":"DeepFloyd IF","local":"deepfloyd-if","sections":[{"title":"Overview","local":"overview","sections":[],"depth":2},{"title":"Usage","local":"usage","sections":[{"title":"Text-to-Image Generation","local":"text-to-image-generation","sections":[],"depth":3},{"title":"Text Guided Image-to-Image Generation","local":"text-guided-image-to-image-generation","sections":[],"depth":3},{"title":"Text Guided Inpainting Generation","local":"text-guided-inpainting-generation","sections":[],"depth":3},{"title":"Converting between different pipelines","local":"converting-between-different-pipelines","sections":[],"depth":3},{"title":"Optimizing for speed","local":"optimizing-for-speed","sections":[],"depth":3},{"title":"Optimizing for memory","local":"optimizing-for-memory","sections":[],"depth":3}],"depth":2},{"title":"Available Pipelines:","local":"available-pipelines","sections":[],"depth":2},{"title":"IFPipeline","local":"diffusers.IFPipeline","sections":[],"depth":2},{"title":"IFSuperResolutionPipeline","local":"diffusers.IFSuperResolutionPipeline","sections":[],"depth":2},{"title":"IFImg2ImgPipeline","local":"diffusers.IFImg2ImgPipeline","sections":[],"depth":2},{"title":"IFImg2ImgSuperResolutionPipeline","local":"diffusers.IFImg2ImgSuperResolutionPipeline","sections":[],"depth":2},{"title":"IFInpaintingPipeline","local":"diffusers.IFInpaintingPipeline","sections":[],"depth":2},{"title":"IFInpaintingSuperResolutionPipeline","local":"diffusers.IFInpaintingSuperResolutionPipeline","sections":[],"depth":2}],"depth":1}';function el(j){return Na(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class il extends Qa{constructor(f){super(),$a(this,f,el,Ka,Ea,{})}}export{il as component};

Xet Storage Details

Size:
197 kB
·
Xet hash:
c0ac847dfde9eb2e01c70d2f6f9eefd95dcba6b45a6cb1bb508aba6dec77baec

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.