Buckets:
| import{s as $a,o as Qa,n as Ot}from"../chunks/scheduler.53228c21.js";import{S as Ya,i as Pa,e as i,s as a,c as r,h as Ha,a as p,d as n,b as l,f as T,g as c,j as y,k as U,l as h,m as s,n as d,t as m,o as g,p as u}from"../chunks/index.100fac89.js";import{C as za}from"../chunks/CopyLLMTxtMenu.969c168d.js";import{D as Z}from"../chunks/Docstring.8eea0d47.js";import{C as J}from"../chunks/CodeBlock.d30a6509.js";import{E as Kt}from"../chunks/ExampleCodeBlock.5e9b5749.js";import{H as v,E as qa}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.92f39b94.js";function La(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGUGlwZWxpbmUlMkMlMjBJRlN1cGVyUmVzb2x1dGlvblBpcGVsaW5lJTJDJTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwcHRfdG9fcGlsJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlJTIwJTNEJTIwSUZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCdhJTIwcGhvdG8lMjBvZiUyMGElMjBrYW5nYXJvbyUyMHdlYXJpbmclMjBhbiUyMG9yYW5nZSUyMGhvb2RpZSUyMGFuZCUyMGJsdWUlMjBzdW5nbGFzc2VzJTIwc3RhbmRpbmclMjBpbiUyMGZyb250JTIwb2YlMjB0aGUlMjBlaWZmZWwlMjB0b3dlciUyMGhvbGRpbmclMjBhJTIwc2lnbiUyMHRoYXQlMjBzYXlzJTIwJTIydmVyeSUyMGRlZXAlMjBsZWFybmluZyUyMiclMEFwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0X2VtYmVkcyUzRHByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9wcm9tcHRfZW1iZWRzJTNEbmVnYXRpdmVfZW1iZWRzJTJDJTIwb3V0cHV0X3R5cGUlM0QlMjJwdCUyMikuaW1hZ2VzJTBBJTBBJTIzJTIwc2F2ZSUyMGludGVybWVkaWF0ZSUyMGltYWdlJTBBcGlsX2ltYWdlJTIwJTNEJTIwcHRfdG9fcGlsKGltYWdlKSUwQXBpbF9pbWFnZSU1QjAlNUQuc2F2ZSglMjIuJTJGaWZfc3RhZ2VfSS5wbmclMjIpJTBBJTBBc3VwZXJfcmVzXzFfcGlwZSUyMCUzRCUyMElGU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMV9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMV9waXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTBBKS5pbWFnZXMlMEElMEElMjMlMjBzYXZlJTIwaW50ZXJtZWRpYXRlJTIwaW1hZ2UlMEFwaWxfaW1hZ2UlMjAlM0QlMjBwdF90b19waWwoaW1hZ2UpJTBBcGlsX2ltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JLnBuZyUyMiklMEElMEFzYWZldHlfbW9kdWxlcyUyMCUzRCUyMCU3QiUwQSUyMCUyMCUyMCUyMCUyMmZlYXR1cmVfZXh0cmFjdG9yJTIyJTNBJTIwcGlwZS5mZWF0dXJlX2V4dHJhY3RvciUyQyUwQSUyMCUyMCUyMCUyMCUyMnNhZmV0eV9jaGVja2VyJTIyJTNBJTIwcGlwZS5zYWZldHlfY2hlY2tlciUyQyUwQSUyMCUyMCUyMCUyMCUyMndhdGVybWFya2VyJTIyJTNBJTIwcGlwZS53YXRlcm1hcmtlciUyQyUwQSU3RCUwQXN1cGVyX3Jlc18yX3BpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXg0LXVwc2NhbGVyJTIyJTJDJTIwKipzYWZldHlfbW9kdWxlcyUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMl9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMl9waXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span>pipe = IFPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'</span> | |
| <span class="hljs-meta">>>> </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type=<span class="hljs-string">"pt"</span>).images | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># save intermediate image</span> | |
| <span class="hljs-meta">>>> </span>pil_image = pt_to_pil(image) | |
| <span class="hljs-meta">>>> </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_I.png"</span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>image = super_res_1_pipe( | |
| <span class="hljs-meta">... </span> image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type=<span class="hljs-string">"pt"</span> | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># save intermediate image</span> | |
| <span class="hljs-meta">>>> </span>pil_image = pt_to_pil(image) | |
| <span class="hljs-meta">>>> </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_I.png"</span>) | |
| <span class="hljs-meta">>>> </span>safety_modules = { | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"feature_extractor"</span>: pipe.feature_extractor, | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"safety_checker"</span>: pipe.safety_checker, | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"watermarker"</span>: pipe.watermarker, | |
| <span class="hljs-meta">... </span>} | |
| <span class="hljs-meta">>>> </span>super_res_2_pipe = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stabilityai/stable-diffusion-x4-upscaler"</span>, **safety_modules, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>super_res_2_pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>image = super_res_2_pipe( | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_II.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Ot,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function Da(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGUGlwZWxpbmUlMkMlMjBJRlN1cGVyUmVzb2x1dGlvblBpcGVsaW5lJTJDJTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwcHRfdG9fcGlsJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlJTIwJTNEJTIwSUZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCdhJTIwcGhvdG8lMjBvZiUyMGElMjBrYW5nYXJvbyUyMHdlYXJpbmclMjBhbiUyMG9yYW5nZSUyMGhvb2RpZSUyMGFuZCUyMGJsdWUlMjBzdW5nbGFzc2VzJTIwc3RhbmRpbmclMjBpbiUyMGZyb250JTIwb2YlMjB0aGUlMjBlaWZmZWwlMjB0b3dlciUyMGhvbGRpbmclMjBhJTIwc2lnbiUyMHRoYXQlMjBzYXlzJTIwJTIydmVyeSUyMGRlZXAlMjBsZWFybmluZyUyMiclMEFwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0X2VtYmVkcyUzRHByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9wcm9tcHRfZW1iZWRzJTNEbmVnYXRpdmVfZW1iZWRzJTJDJTIwb3V0cHV0X3R5cGUlM0QlMjJwdCUyMikuaW1hZ2VzJTBBJTBBJTIzJTIwc2F2ZSUyMGludGVybWVkaWF0ZSUyMGltYWdlJTBBcGlsX2ltYWdlJTIwJTNEJTIwcHRfdG9fcGlsKGltYWdlKSUwQXBpbF9pbWFnZSU1QjAlNUQuc2F2ZSglMjIuJTJGaWZfc3RhZ2VfSS5wbmclMjIpJTBBJTBBc3VwZXJfcmVzXzFfcGlwZSUyMCUzRCUyMElGU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMV9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMV9waXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span>pipe = IFPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'</span> | |
| <span class="hljs-meta">>>> </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type=<span class="hljs-string">"pt"</span>).images | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># save intermediate image</span> | |
| <span class="hljs-meta">>>> </span>pil_image = pt_to_pil(image) | |
| <span class="hljs-meta">>>> </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_I.png"</span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>image = super_res_1_pipe( | |
| <span class="hljs-meta">... </span> image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_II.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Ot,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function Aa(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW1nMkltZ1BpcGVsaW5lJTJDJTIwSUZJbWcySW1nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQWltcG9ydCUyMHJlcXVlc3RzJTBBZnJvbSUyMGlvJTIwaW1wb3J0JTIwQnl0ZXNJTyUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZzdGFibGUtZGlmZnVzaW9uJTJGbWFpbiUyRmFzc2V0cyUyRnN0YWJsZS1zYW1wbGVzJTJGaW1nMmltZyUyRnNrZXRjaC1tb3VudGFpbnMtaW5wdXQuanBnJTIyJTBBcmVzcG9uc2UlMjAlM0QlMjByZXF1ZXN0cy5nZXQodXJsKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwSW1hZ2Uub3BlbihCeXRlc0lPKHJlc3BvbnNlLmNvbnRlbnQpKS5jb252ZXJ0KCUyMlJHQiUyMiklMEFvcmlnaW5hbF9pbWFnZSUyMCUzRCUyMG9yaWdpbmFsX2ltYWdlLnJlc2l6ZSgoNzY4JTJDJTIwNTEyKSklMEElMEFwaXBlJTIwJTNEJTIwSUZJbWcySW1nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwZmFudGFzeSUyMGxhbmRzY2FwZSUyMGluJTIwc3R5bGUlMjBtaW5lY3JhZnQlMjIlMEFwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RvcmlnaW5hbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEEpLmltYWdlcyUwQSUwQSUyMyUyMHNhdmUlMjBpbnRlcm1lZGlhdGUlMjBpbWFnZSUwQXBpbF9pbWFnZSUyMCUzRCUyMHB0X3RvX3BpbChpbWFnZSklMEFwaWxfaW1hZ2UlNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQXN1cGVyX3Jlc18xX3BpcGUlMjAlM0QlMjBJRkltZzJJbWdTdXBlclJlc29sdXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSUktTC12MS4wJTIyJTJDJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQXN1cGVyX3Jlc18xX3BpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFpbWFnZSUyMCUzRCUyMHN1cGVyX3Jlc18xX3BpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"</span> | |
| <span class="hljs-meta">>>> </span>response = requests.get(url) | |
| <span class="hljs-meta">>>> </span>original_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>original_image = original_image.resize((<span class="hljs-number">768</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>pipe = IFImg2ImgPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, | |
| <span class="hljs-meta">... </span> variant=<span class="hljs-string">"fp16"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A fantasy landscape in style minecraft"</span> | |
| <span class="hljs-meta">>>> </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> image=original_image, | |
| <span class="hljs-meta">... </span> prompt_embeds=prompt_embeds, | |
| <span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds, | |
| <span class="hljs-meta">... </span> output_type=<span class="hljs-string">"pt"</span>, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># save intermediate image</span> | |
| <span class="hljs-meta">>>> </span>pil_image = pt_to_pil(image) | |
| <span class="hljs-meta">>>> </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_I.png"</span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, | |
| <span class="hljs-meta">... </span> text_encoder=<span class="hljs-literal">None</span>, | |
| <span class="hljs-meta">... </span> variant=<span class="hljs-string">"fp16"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>image = super_res_1_pipe( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> original_image=original_image, | |
| <span class="hljs-meta">... </span> prompt_embeds=prompt_embeds, | |
| <span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_II.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Ot,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function Ka(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW1nMkltZ1BpcGVsaW5lJTJDJTIwSUZJbWcySW1nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQWltcG9ydCUyMHJlcXVlc3RzJTBBZnJvbSUyMGlvJTIwaW1wb3J0JTIwQnl0ZXNJTyUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZzdGFibGUtZGlmZnVzaW9uJTJGbWFpbiUyRmFzc2V0cyUyRnN0YWJsZS1zYW1wbGVzJTJGaW1nMmltZyUyRnNrZXRjaC1tb3VudGFpbnMtaW5wdXQuanBnJTIyJTBBcmVzcG9uc2UlMjAlM0QlMjByZXF1ZXN0cy5nZXQodXJsKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwSW1hZ2Uub3BlbihCeXRlc0lPKHJlc3BvbnNlLmNvbnRlbnQpKS5jb252ZXJ0KCUyMlJHQiUyMiklMEFvcmlnaW5hbF9pbWFnZSUyMCUzRCUyMG9yaWdpbmFsX2ltYWdlLnJlc2l6ZSgoNzY4JTJDJTIwNTEyKSklMEElMEFwaXBlJTIwJTNEJTIwSUZJbWcySW1nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwZmFudGFzeSUyMGxhbmRzY2FwZSUyMGluJTIwc3R5bGUlMjBtaW5lY3JhZnQlMjIlMEFwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RvcmlnaW5hbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEEpLmltYWdlcyUwQSUwQSUyMyUyMHNhdmUlMjBpbnRlcm1lZGlhdGUlMjBpbWFnZSUwQXBpbF9pbWFnZSUyMCUzRCUyMHB0X3RvX3BpbChpbWFnZSklMEFwaWxfaW1hZ2UlNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQXN1cGVyX3Jlc18xX3BpcGUlMjAlM0QlMjBJRkltZzJJbWdTdXBlclJlc29sdXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSUktTC12MS4wJTIyJTJDJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKSUwQXN1cGVyX3Jlc18xX3BpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFpbWFnZSUyMCUzRCUyMHN1cGVyX3Jlc18xX3BpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"</span> | |
| <span class="hljs-meta">>>> </span>response = requests.get(url) | |
| <span class="hljs-meta">>>> </span>original_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>original_image = original_image.resize((<span class="hljs-number">768</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>pipe = IFImg2ImgPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, | |
| <span class="hljs-meta">... </span> variant=<span class="hljs-string">"fp16"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A fantasy landscape in style minecraft"</span> | |
| <span class="hljs-meta">>>> </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> image=original_image, | |
| <span class="hljs-meta">... </span> prompt_embeds=prompt_embeds, | |
| <span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds, | |
| <span class="hljs-meta">... </span> output_type=<span class="hljs-string">"pt"</span>, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># save intermediate image</span> | |
| <span class="hljs-meta">>>> </span>pil_image = pt_to_pil(image) | |
| <span class="hljs-meta">>>> </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_I.png"</span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, | |
| <span class="hljs-meta">... </span> text_encoder=<span class="hljs-literal">None</span>, | |
| <span class="hljs-meta">... </span> variant=<span class="hljs-string">"fp16"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>image = super_res_1_pipe( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> original_image=original_image, | |
| <span class="hljs-meta">... </span> prompt_embeds=prompt_embeds, | |
| <span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_II.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Ot,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function Oa(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW5wYWludGluZ1BpcGVsaW5lJTJDJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQWltcG9ydCUyMHJlcXVlc3RzJTBBZnJvbSUyMGlvJTIwaW1wb3J0JTIwQnl0ZXNJTyUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmRpZmZ1c2VycyUyRmRvY3MtaW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZpZiUyRnBlcnNvbi5wbmclMjIlMEFyZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBb3JpZ2luYWxfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpLmNvbnZlcnQoJTIyUkdCJTIyKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwb3JpZ2luYWxfaW1hZ2UlMEElMEF1cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZkaWZmdXNlcnMlMkZkb2NzLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGaWYlMkZnbGFzc2VzX21hc2sucG5nJTIyJTBBcmVzcG9uc2UlMjAlM0QlMjByZXF1ZXN0cy5nZXQodXJsKSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpJTBBbWFza19pbWFnZSUyMCUzRCUyMG1hc2tfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwSUZJbnBhaW50aW5nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJibHVlJTIwc3VuZ2xhc3NlcyUyMiUwQXByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9lbWJlZHMlMjAlM0QlMjBwaXBlLmVuY29kZV9wcm9tcHQocHJvbXB0KSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBpbWFnZSUzRG9yaWdpbmFsX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTJDJTBBKS5pbWFnZXMlMEElMEElMjMlMjBzYXZlJTIwaW50ZXJtZWRpYXRlJTIwaW1hZ2UlMEFwaWxfaW1hZ2UlMjAlM0QlMjBwdF90b19waWwoaW1hZ2UpJTBBcGlsX2ltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JLnBuZyUyMiklMEElMEFzdXBlcl9yZXNfMV9waXBlJTIwJTNEJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMV9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMV9waXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"</span> | |
| <span class="hljs-meta">>>> </span>response = requests.get(url) | |
| <span class="hljs-meta">>>> </span>original_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>original_image = original_image | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"</span> | |
| <span class="hljs-meta">>>> </span>response = requests.get(url) | |
| <span class="hljs-meta">>>> </span>mask_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)) | |
| <span class="hljs-meta">>>> </span>mask_image = mask_image | |
| <span class="hljs-meta">>>> </span>pipe = IFInpaintingPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"blue sunglasses"</span> | |
| <span class="hljs-meta">>>> </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> image=original_image, | |
| <span class="hljs-meta">... </span> mask_image=mask_image, | |
| <span class="hljs-meta">... </span> prompt_embeds=prompt_embeds, | |
| <span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds, | |
| <span class="hljs-meta">... </span> output_type=<span class="hljs-string">"pt"</span>, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># save intermediate image</span> | |
| <span class="hljs-meta">>>> </span>pil_image = pt_to_pil(image) | |
| <span class="hljs-meta">>>> </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_I.png"</span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>image = super_res_1_pipe( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> mask_image=mask_image, | |
| <span class="hljs-meta">... </span> original_image=original_image, | |
| <span class="hljs-meta">... </span> prompt_embeds=prompt_embeds, | |
| <span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_II.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Ot,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function el(j){let f,w="Examples:",b,_,M;return _=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW5wYWludGluZ1BpcGVsaW5lJTJDJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQWltcG9ydCUyMHJlcXVlc3RzJTBBZnJvbSUyMGlvJTIwaW1wb3J0JTIwQnl0ZXNJTyUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmRpZmZ1c2VycyUyRmRvY3MtaW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZpZiUyRnBlcnNvbi5wbmclMjIlMEFyZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBb3JpZ2luYWxfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpLmNvbnZlcnQoJTIyUkdCJTIyKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwb3JpZ2luYWxfaW1hZ2UlMEElMEF1cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZkaWZmdXNlcnMlMkZkb2NzLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGaWYlMkZnbGFzc2VzX21hc2sucG5nJTIyJTBBcmVzcG9uc2UlMjAlM0QlMjByZXF1ZXN0cy5nZXQodXJsKSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpJTBBbWFza19pbWFnZSUyMCUzRCUyMG1hc2tfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwSUZJbnBhaW50aW5nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJibHVlJTIwc3VuZ2xhc3NlcyUyMiUwQSUwQXByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9lbWJlZHMlMjAlM0QlMjBwaXBlLmVuY29kZV9wcm9tcHQocHJvbXB0KSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBpbWFnZSUzRG9yaWdpbmFsX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTJDJTBBKS5pbWFnZXMlMEElMEElMjMlMjBzYXZlJTIwaW50ZXJtZWRpYXRlJTIwaW1hZ2UlMEFwaWxfaW1hZ2UlMjAlM0QlMjBwdF90b19waWwoaW1hZ2UpJTBBcGlsX2ltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JLnBuZyUyMiklMEElMEFzdXBlcl9yZXNfMV9waXBlJTIwJTNEJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdXBlcl9yZXNfMV9waXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBaW1hZ2UlMjAlM0QlMjBzdXBlcl9yZXNfMV9waXBlKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEEpLmltYWdlcyUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"</span> | |
| <span class="hljs-meta">>>> </span>response = requests.get(url) | |
| <span class="hljs-meta">>>> </span>original_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>original_image = original_image | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"</span> | |
| <span class="hljs-meta">>>> </span>response = requests.get(url) | |
| <span class="hljs-meta">>>> </span>mask_image = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)) | |
| <span class="hljs-meta">>>> </span>mask_image = mask_image | |
| <span class="hljs-meta">>>> </span>pipe = IFInpaintingPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"blue sunglasses"</span> | |
| <span class="hljs-meta">>>> </span>prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> image=original_image, | |
| <span class="hljs-meta">... </span> mask_image=mask_image, | |
| <span class="hljs-meta">... </span> prompt_embeds=prompt_embeds, | |
| <span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds, | |
| <span class="hljs-meta">... </span> output_type=<span class="hljs-string">"pt"</span>, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># save intermediate image</span> | |
| <span class="hljs-meta">>>> </span>pil_image = pt_to_pil(image) | |
| <span class="hljs-meta">>>> </span>pil_image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_I.png"</span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>super_res_1_pipe.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>image = super_res_1_pipe( | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> mask_image=mask_image, | |
| <span class="hljs-meta">... </span> original_image=original_image, | |
| <span class="hljs-meta">... </span> prompt_embeds=prompt_embeds, | |
| <span class="hljs-meta">... </span> negative_prompt_embeds=negative_embeds, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"./if_stage_II.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=w,b=a(),r(_.$$.fragment)},l(o){f=p(o,"P",{"data-svelte-h":!0}),y(f)!=="svelte-kvfsh7"&&(f.textContent=w),b=l(o),c(_.$$.fragment,o)},m(o,I){s(o,f,I),s(o,b,I),d(_,o,I),M=!0},p:Ot,i(o){M||(m(_.$$.fragment,o),M=!0)},o(o){g(_.$$.fragment,o),M=!1},d(o){o&&(n(f),n(b)),u(_,o)}}}function tl(j){let f,w,b,_,M,o,I,tn,P,Os='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&logo=apple&logoColor=white%22"/>',nn,ae,sn,le,ea=`DeepFloyd IF is a novel state-of-the-art open-source text-to-image model with a high degree of photorealism and language understanding. | |
| The model is a modular composed of a frozen text encoder and three cascaded pixel diffusion modules:`,an,oe,ta=`<li>Stage 1: a base model that generates 64x64 px image based on text prompt,</li> <li>Stage 2: a 64x64 px => 256x256 px super-resolution model, and</li> <li>Stage 3: a 256x256 px => 1024x1024 px super-resolution model | |
| Stage 1 and Stage 2 utilize a frozen text encoder based on the T5 transformer to extract text embeddings, which are then fed into a UNet architecture enhanced with cross-attention and attention pooling. | |
| Stage 3 is <a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler" rel="nofollow">Stability AI’s x4 Upscaling model</a>. | |
| The result is a highly efficient model that outperforms current state-of-the-art models, achieving a zero-shot FID score of 6.66 on the COCO dataset. | |
| Our work underscores the potential of larger UNet architectures in the first stage of cascaded diffusion models and depicts a promising future for text-to-image synthesis.</li>`,ln,ie,on,pe,na="Before you can use IF, you need to accept its usage conditions. To do so:",pn,re,sa='<li>Make sure to have a <a href="https://huggingface.co/join" rel="nofollow">Hugging Face account</a> and be logged in.</li> <li>Accept the license on the model card of <a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0" rel="nofollow">DeepFloyd/IF-I-XL-v1.0</a>. Accepting the license on the stage I model card will auto accept for the other IF models.</li> <li>Make sure to login locally. Install <code>huggingface_hub</code>:</li>',rn,ce,cn,de,aa="run the login function in a Python shell:",dn,me,mn,ge,la='and enter your <a href="https://huggingface.co/docs/hub/security-tokens#what-are-user-access-tokens" rel="nofollow">Hugging Face Hub access token</a>.',gn,ue,oa="Next we install <code>diffusers</code> and dependencies:",un,fe,fn,he,ia="The following sections give more in-detail examples of how to use IF. Specifically:",hn,_e,pa='<li><a href="#text-to-image-generation">Text-to-Image Generation</a></li> <li><a href="#text-guided-image-to-image-generation">Image-to-Image Generation</a></li> <li><a href="#text-guided-inpainting-generation">Inpainting</a></li> <li><a href="#converting-between-different-pipelines">Reusing model weights</a></li> <li><a href="#optimizing-for-speed">Speed optimization</a></li> <li><a href="#optimizing-for-memory">Memory optimization</a></li>',_n,ye,ra="<strong>Available checkpoints</strong>",yn,Me,ca='<li><p><em>Stage-1</em></p> <ul><li><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0" rel="nofollow">DeepFloyd/IF-I-XL-v1.0</a></li> <li><a href="https://huggingface.co/DeepFloyd/IF-I-L-v1.0" rel="nofollow">DeepFloyd/IF-I-L-v1.0</a></li> <li><a href="https://huggingface.co/DeepFloyd/IF-I-M-v1.0" rel="nofollow">DeepFloyd/IF-I-M-v1.0</a></li></ul></li> <li><p><em>Stage-2</em></p> <ul><li><a href="https://huggingface.co/DeepFloyd/IF-II-L-v1.0" rel="nofollow">DeepFloyd/IF-II-L-v1.0</a></li> <li><a href="https://huggingface.co/DeepFloyd/IF-II-M-v1.0" rel="nofollow">DeepFloyd/IF-II-M-v1.0</a></li></ul></li> <li><p><em>Stage-3</em></p> <ul><li><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler" rel="nofollow">stabilityai/stable-diffusion-x4-upscaler</a></li></ul></li>',Mn,be,da='<strong>Google Colab</strong> <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/deepfloyd_if_free_tier_google_colab.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>',bn,Ie,In,we,ma='By default diffusers makes use of <a href="../../optimization/memory#model-offloading">model cpu offloading</a> to run the whole IF pipeline with as little as 14 GB of VRAM.',wn,Je,Jn,Ue,Un,Te,ga=`The same IF model weights can be used for text-guided image-to-image translation or image variation. | |
| In this case just make sure to load the weights using the <a href="/docs/diffusers/pr_12762/en/api/pipelines/deepfloyd_if#diffusers.IFImg2ImgPipeline">IFImg2ImgPipeline</a> and <a href="/docs/diffusers/pr_12762/en/api/pipelines/deepfloyd_if#diffusers.IFImg2ImgSuperResolutionPipeline">IFImg2ImgSuperResolutionPipeline</a> pipelines.`,Tn,Ze,ua=`<strong>Note</strong>: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines | |
| without loading them twice by making use of the <a href="/docs/diffusers/pr_12762/en/api/pipelines/overview#diffusers.DiffusionPipeline.components">components</a> argument as explained <a href="#converting-between-different-pipelines">here</a>.`,Zn,ve,vn,je,jn,We,fa=`The same IF model weights can be used for text-guided image-to-image translation or image variation. | |
| In this case just make sure to load the weights using the <a href="/docs/diffusers/pr_12762/en/api/pipelines/deepfloyd_if#diffusers.IFInpaintingPipeline">IFInpaintingPipeline</a> and <a href="/docs/diffusers/pr_12762/en/api/pipelines/deepfloyd_if#diffusers.IFInpaintingSuperResolutionPipeline">IFInpaintingSuperResolutionPipeline</a> pipelines.`,Wn,Be,ha=`<strong>Note</strong>: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines | |
| without loading them twice by making use of the <code>~DiffusionPipeline.components()</code> function as explained <a href="#converting-between-different-pipelines">here</a>.`,Bn,Fe,Fn,Xe,Xn,ke,_a="In addition to being loaded with <code>from_pretrained</code>, Pipelines can also be loaded directly from each other.",kn,Ge,Gn,Ce,Cn,Re,ya="The simplest optimization to run IF faster is to move all model components to the GPU.",Rn,Ve,Vn,xe,Ma="You can also run the diffusion process for a shorter number of timesteps.",xn,Se,ba="This can either be done with the <code>num_inference_steps</code> argument:",Sn,Ee,En,Ne,Ia="Or with the <code>timesteps</code> argument:",Nn,$e,$n,Qe,wa=`When doing image variation or inpainting, you can also decrease the number of timesteps | |
| with the strength argument. The strength argument is the amount of noise to add to the input image which also determines how many steps to run in the denoising process. | |
| A smaller number will vary the image less but run faster.`,Qn,Ye,Yn,Pe,Ja=`You can also use <a href="../../optimization/fp16#torchcompile"><code>torch.compile</code></a>. Note that we have not exhaustively tested <code>torch.compile</code> | |
| with IF and it might not give expected results.`,Pn,He,Hn,ze,zn,qe,Ua="When optimizing for GPU memory, we can use the standard diffusers CPU offloading APIs.",qn,Le,Ta="Either the model based CPU offloading,",Ln,De,Dn,Ae,Za="or the more aggressive layer based CPU offloading.",An,Ke,Kn,Oe,va="Additionally, T5 can be loaded in 8bit precision",On,et,es,tt,ja=`For CPU RAM constrained machines like Google Colab free tier where we can’t load all model components to the CPU at once, we can manually only load the pipeline with | |
| the text encoder or UNet when the respective model components are needed.`,ts,nt,ns,st,ss,at,Wa='<thead><tr><th>Pipeline</th> <th>Tasks</th> <th align="center">Colab</th></tr></thead> <tbody><tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py" rel="nofollow">pipeline_if.py</a></td> <td><em>Text-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py" rel="nofollow">pipeline_if_superresolution.py</a></td> <td><em>Text-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py" rel="nofollow">pipeline_if_img2img.py</a></td> <td><em>Image-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py" rel="nofollow">pipeline_if_img2img_superresolution.py</a></td> <td><em>Image-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py" rel="nofollow">pipeline_if_inpainting.py</a></td> <td><em>Image-to-Image Generation</em></td> <td align="center">-</td></tr> <tr><td><a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py" rel="nofollow">pipeline_if_inpainting_superresolution.py</a></td> <td><em>Image-to-Image Generation</em></td> <td align="center">-</td></tr></tbody>',as,lt,ls,W,ot,Js,C,it,Us,Xt,Ba="Function invoked when calling the pipeline for generation.",Ts,H,Zs,z,pt,vs,kt,Fa="Encodes the prompt into text encoder hidden states.",os,rt,is,B,ct,js,R,dt,Ws,Gt,Xa="Function invoked when calling the pipeline for generation.",Bs,q,Fs,L,mt,Xs,Ct,ka="Encodes the prompt into text encoder hidden states.",ps,gt,rs,F,ut,ks,V,ft,Gs,Rt,Ga="Function invoked when calling the pipeline for generation.",Cs,D,Rs,A,ht,Vs,Vt,Ca="Encodes the prompt into text encoder hidden states.",cs,_t,ds,X,yt,xs,x,Mt,Ss,xt,Ra="Function invoked when calling the pipeline for generation.",Es,K,Ns,O,bt,$s,St,Va="Encodes the prompt into text encoder hidden states.",ms,It,gs,k,wt,Qs,S,Jt,Ys,Et,xa="Function invoked when calling the pipeline for generation.",Ps,ee,Hs,te,Ut,zs,Nt,Sa="Encodes the prompt into text encoder hidden states.",us,Tt,fs,G,Zt,qs,E,vt,Ls,$t,Ea="Function invoked when calling the pipeline for generation.",Ds,ne,As,se,jt,Ks,Qt,Na="Encodes the prompt into text encoder hidden states.",hs,Wt,_s,en,ys;return M=new za({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),I=new v({props:{title:"DeepFloyd IF",local:"deepfloyd-if",headingTag:"h1"}}),ae=new v({props:{title:"Overview",local:"overview",headingTag:"h2"}}),ie=new v({props:{title:"Usage",local:"usage",headingTag:"h2"}}),ce=new J({props:{code:"cGlwJTIwaW5zdGFsbCUyMGh1Z2dpbmdmYWNlX2h1YiUyMC0tdXBncmFkZQ==",highlighted:"pip install huggingface_hub --upgrade",wrap:!1}}),me=new J({props:{code:"ZnJvbSUyMGh1Z2dpbmdmYWNlX2h1YiUyMGltcG9ydCUyMGxvZ2luJTBBJTBBbG9naW4oKQ==",highlighted:`<span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> login | |
| login()`,wrap:!1}}),fe=new J({props:{code:"cGlwJTIwaW5zdGFsbCUyMC1xJTIwZGlmZnVzZXJzJTIwYWNjZWxlcmF0ZSUyMHRyYW5zZm9ybWVycw==",highlighted:"pip install -q diffusers accelerate transformers",wrap:!1}}),Ie=new v({props:{title:"Text-to-Image Generation",local:"text-to-image-generation",headingTag:"h3"}}),Je=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMHB0X3RvX3BpbCUyQyUyMG1ha2VfaW1hZ2VfZ3JpZCUwQWltcG9ydCUyMHRvcmNoJTBBJTBBJTIzJTIwc3RhZ2UlMjAxJTBBc3RhZ2VfMSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFzdGFnZV8xLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBJTIzJTIwc3RhZ2UlMjAyJTBBc3RhZ2VfMiUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJEZWVwRmxveWQlMkZJRi1JSS1MLXYxLjAlMjIlMkMlMjB0ZXh0X2VuY29kZXIlM0ROb25lJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBc3RhZ2VfMi5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQSUyMyUyMHN0YWdlJTIwMyUwQXNhZmV0eV9tb2R1bGVzJTIwJTNEJTIwJTdCJTBBJTIwJTIwJTIwJTIwJTIyZmVhdHVyZV9leHRyYWN0b3IlMjIlM0ElMjBzdGFnZV8xLmZlYXR1cmVfZXh0cmFjdG9yJTJDJTBBJTIwJTIwJTIwJTIwJTIyc2FmZXR5X2NoZWNrZXIlMjIlM0ElMjBzdGFnZV8xLnNhZmV0eV9jaGVja2VyJTJDJTBBJTIwJTIwJTIwJTIwJTIyd2F0ZXJtYXJrZXIlMjIlM0ElMjBzdGFnZV8xLndhdGVybWFya2VyJTJDJTBBJTdEJTBBc3RhZ2VfMyUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teDQtdXBzY2FsZXIlMjIlMkMlMjAqKnNhZmV0eV9tb2R1bGVzJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXN0YWdlXzMuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAnYSUyMHBob3RvJTIwb2YlMjBhJTIwa2FuZ2Fyb28lMjB3ZWFyaW5nJTIwYW4lMjBvcmFuZ2UlMjBob29kaWUlMjBhbmQlMjBibHVlJTIwc3VuZ2xhc3NlcyUyMHN0YW5kaW5nJTIwaW4lMjBmcm9udCUyMG9mJTIwdGhlJTIwZWlmZmVsJTIwdG93ZXIlMjBob2xkaW5nJTIwYSUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMCUyMnZlcnklMjBkZWVwJTIwbGVhcm5pbmclMjInJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoMSklMEElMEElMjMlMjB0ZXh0JTIwZW1iZWRzJTBBcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX2VtYmVkcyUyMCUzRCUyMHN0YWdlXzEuZW5jb2RlX3Byb21wdChwcm9tcHQpJTBBJTBBJTIzJTIwc3RhZ2UlMjAxJTBBc3RhZ2VfMV9vdXRwdXQlMjAlM0QlMjBzdGFnZV8xKCUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUyMGdlbmVyYXRvciUzRGdlbmVyYXRvciUyQyUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMEEpLmltYWdlcyUwQSUyM3B0X3RvX3BpbChzdGFnZV8xX291dHB1dCklNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQSUyMyUyMHN0YWdlJTIwMiUwQXN0YWdlXzJfb3V0cHV0JTIwJTNEJTIwc3RhZ2VfMiglMEElMjAlMjAlMjAlMjBpbWFnZSUzRHN0YWdlXzFfb3V0cHV0JTJDJTBBJTIwJTIwJTIwJTIwcHJvbXB0X2VtYmVkcyUzRHByb21wdF9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHRfZW1iZWRzJTNEbmVnYXRpdmVfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJwdCUyMiUyQyUwQSkuaW1hZ2VzJTBBJTIzcHRfdG9fcGlsKHN0YWdlXzJfb3V0cHV0KSU1QjAlNUQuc2F2ZSglMjIuJTJGaWZfc3RhZ2VfSUkucG5nJTIyKSUwQSUwQSUyMyUyMHN0YWdlJTIwMyUwQXN0YWdlXzNfb3V0cHV0JTIwJTNEJTIwc3RhZ2VfMyhwcm9tcHQlM0Rwcm9tcHQlMkMlMjBpbWFnZSUzRHN0YWdlXzJfb3V0cHV0JTJDJTIwbm9pc2VfbGV2ZWwlM0QxMDAlMkMlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IpLmltYWdlcyUwQSUyM3N0YWdlXzNfb3V0cHV0JTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSUkucG5nJTIyKSUwQW1ha2VfaW1hZ2VfZ3JpZCglNUJwdF90b19waWwoc3RhZ2VfMV9vdXRwdXQpJTVCMCU1RCUyQyUyMHB0X3RvX3BpbChzdGFnZV8yX291dHB1dCklNUIwJTVEJTJDJTIwc3RhZ2VfM19vdXRwdXQlNUIwJTVEJTVEJTJDJTIwcm93cyUzRDElMkMlMjByb3dzJTNEMyk=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil, make_image_grid | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-comment"># stage 1</span> | |
| stage_1 = DiffusionPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| stage_1.enable_model_cpu_offload() | |
| <span class="hljs-comment"># stage 2</span> | |
| stage_2 = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| ) | |
| stage_2.enable_model_cpu_offload() | |
| <span class="hljs-comment"># stage 3</span> | |
| safety_modules = { | |
| <span class="hljs-string">"feature_extractor"</span>: stage_1.feature_extractor, | |
| <span class="hljs-string">"safety_checker"</span>: stage_1.safety_checker, | |
| <span class="hljs-string">"watermarker"</span>: stage_1.watermarker, | |
| } | |
| stage_3 = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-string">"stabilityai/stable-diffusion-x4-upscaler"</span>, **safety_modules, torch_dtype=torch.float16 | |
| ) | |
| stage_3.enable_model_cpu_offload() | |
| prompt = <span class="hljs-string">'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'</span> | |
| generator = torch.manual_seed(<span class="hljs-number">1</span>) | |
| <span class="hljs-comment"># text embeds</span> | |
| prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) | |
| <span class="hljs-comment"># stage 1</span> | |
| stage_1_output = stage_1( | |
| prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type=<span class="hljs-string">"pt"</span> | |
| ).images | |
| <span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save("./if_stage_I.png")</span> | |
| <span class="hljs-comment"># stage 2</span> | |
| stage_2_output = stage_2( | |
| image=stage_1_output, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_embeds, | |
| generator=generator, | |
| output_type=<span class="hljs-string">"pt"</span>, | |
| ).images | |
| <span class="hljs-comment">#pt_to_pil(stage_2_output)[0].save("./if_stage_II.png")</span> | |
| <span class="hljs-comment"># stage 3</span> | |
| stage_3_output = stage_3(prompt=prompt, image=stage_2_output, noise_level=<span class="hljs-number">100</span>, generator=generator).images | |
| <span class="hljs-comment">#stage_3_output[0].save("./if_stage_III.png")</span> | |
| make_image_grid([pt_to_pil(stage_1_output)[<span class="hljs-number">0</span>], pt_to_pil(stage_2_output)[<span class="hljs-number">0</span>], stage_3_output[<span class="hljs-number">0</span>]], rows=<span class="hljs-number">1</span>, rows=<span class="hljs-number">3</span>)`,wrap:!1}}),Ue=new v({props:{title:"Text Guided Image-to-Image Generation",local:"text-guided-image-to-image-generation",headingTag:"h3"}}),ve=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW1nMkltZ1BpcGVsaW5lJTJDJTIwSUZJbWcySW1nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMkMlMjBsb2FkX2ltYWdlJTJDJTIwbWFrZV9pbWFnZV9ncmlkJTBBaW1wb3J0JTIwdG9yY2glMEElMEElMjMlMjBkb3dubG9hZCUyMGltYWdlJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRnN0YWJsZS1kaWZmdXNpb24lMkZtYWluJTJGYXNzZXRzJTJGc3RhYmxlLXNhbXBsZXMlMkZpbWcyaW1nJTJGc2tldGNoLW1vdW50YWlucy1pbnB1dC5qcGclMjIlMEFvcmlnaW5hbF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UodXJsKSUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwb3JpZ2luYWxfaW1hZ2UucmVzaXplKCg3NjglMkMlMjA1MTIpKSUwQSUwQSUyMyUyMHN0YWdlJTIwMSUwQXN0YWdlXzElMjAlM0QlMjBJRkltZzJJbWdQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBc3RhZ2VfMS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQSUyMyUyMHN0YWdlJTIwMiUwQXN0YWdlXzIlMjAlM0QlMjBJRkltZzJJbWdTdXBlclJlc29sdXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSUktTC12MS4wJTIyJTJDJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXN0YWdlXzIuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEElMjMlMjBzdGFnZSUyMDMlMEFzYWZldHlfbW9kdWxlcyUyMCUzRCUyMCU3QiUwQSUyMCUyMCUyMCUyMCUyMmZlYXR1cmVfZXh0cmFjdG9yJTIyJTNBJTIwc3RhZ2VfMS5mZWF0dXJlX2V4dHJhY3RvciUyQyUwQSUyMCUyMCUyMCUyMCUyMnNhZmV0eV9jaGVja2VyJTIyJTNBJTIwc3RhZ2VfMS5zYWZldHlfY2hlY2tlciUyQyUwQSUyMCUyMCUyMCUyMCUyMndhdGVybWFya2VyJTIyJTNBJTIwc3RhZ2VfMS53YXRlcm1hcmtlciUyQyUwQSU3RCUwQXN0YWdlXzMlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXg0LXVwc2NhbGVyJTIyJTJDJTIwKipzYWZldHlfbW9kdWxlcyUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdGFnZV8zLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMGZhbnRhc3klMjBsYW5kc2NhcGUlMjBpbiUyMHN0eWxlJTIwbWluZWNyYWZ0JTIyJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoMSklMEElMEElMjMlMjB0ZXh0JTIwZW1iZWRzJTBBcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX2VtYmVkcyUyMCUzRCUyMHN0YWdlXzEuZW5jb2RlX3Byb21wdChwcm9tcHQpJTBBJTBBJTIzJTIwc3RhZ2UlMjAxJTBBc3RhZ2VfMV9vdXRwdXQlMjAlM0QlMjBzdGFnZV8xKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTJDJTBBKS5pbWFnZXMlMEElMjNwdF90b19waWwoc3RhZ2VfMV9vdXRwdXQpJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JLnBuZyUyMiklMEElMEElMjMlMjBzdGFnZSUyMDIlMEFzdGFnZV8yX291dHB1dCUyMCUzRCUyMHN0YWdlXzIoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RzdGFnZV8xX291dHB1dCUyQyUwQSUyMCUyMCUyMCUyMG9yaWdpbmFsX2ltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMnB0JTIyJTJDJTBBKS5pbWFnZXMlMEElMjNwdF90b19waWwoc3RhZ2VfMl9vdXRwdXQpJTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSS5wbmclMjIpJTBBJTBBJTIzJTIwc3RhZ2UlMjAzJTBBc3RhZ2VfM19vdXRwdXQlMjAlM0QlMjBzdGFnZV8zKHByb21wdCUzRHByb21wdCUyQyUyMGltYWdlJTNEc3RhZ2VfMl9vdXRwdXQlMkMlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMjBub2lzZV9sZXZlbCUzRDEwMCkuaW1hZ2VzJTBBJTIzc3RhZ2VfM19vdXRwdXQlNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0lJSS5wbmclMjIpJTBBbWFrZV9pbWFnZV9ncmlkKCU1Qm9yaWdpbmFsX2ltYWdlJTJDJTIwcHRfdG9fcGlsKHN0YWdlXzFfb3V0cHV0KSU1QjAlNUQlMkMlMjBwdF90b19waWwoc3RhZ2VfMl9vdXRwdXQpJTVCMCU1RCUyQyUyMHN0YWdlXzNfb3V0cHV0JTVCMCU1RCU1RCUyQyUyMHJvd3MlM0QxJTJDJTIwcm93cyUzRDQp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil, load_image, make_image_grid | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-comment"># download image</span> | |
| url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"</span> | |
| original_image = load_image(url) | |
| original_image = original_image.resize((<span class="hljs-number">768</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-comment"># stage 1</span> | |
| stage_1 = IFImg2ImgPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| stage_1.enable_model_cpu_offload() | |
| <span class="hljs-comment"># stage 2</span> | |
| stage_2 = IFImg2ImgSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| ) | |
| stage_2.enable_model_cpu_offload() | |
| <span class="hljs-comment"># stage 3</span> | |
| safety_modules = { | |
| <span class="hljs-string">"feature_extractor"</span>: stage_1.feature_extractor, | |
| <span class="hljs-string">"safety_checker"</span>: stage_1.safety_checker, | |
| <span class="hljs-string">"watermarker"</span>: stage_1.watermarker, | |
| } | |
| stage_3 = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-string">"stabilityai/stable-diffusion-x4-upscaler"</span>, **safety_modules, torch_dtype=torch.float16 | |
| ) | |
| stage_3.enable_model_cpu_offload() | |
| prompt = <span class="hljs-string">"A fantasy landscape in style minecraft"</span> | |
| generator = torch.manual_seed(<span class="hljs-number">1</span>) | |
| <span class="hljs-comment"># text embeds</span> | |
| prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) | |
| <span class="hljs-comment"># stage 1</span> | |
| stage_1_output = stage_1( | |
| image=original_image, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_embeds, | |
| generator=generator, | |
| output_type=<span class="hljs-string">"pt"</span>, | |
| ).images | |
| <span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save("./if_stage_I.png")</span> | |
| <span class="hljs-comment"># stage 2</span> | |
| stage_2_output = stage_2( | |
| image=stage_1_output, | |
| original_image=original_image, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_embeds, | |
| generator=generator, | |
| output_type=<span class="hljs-string">"pt"</span>, | |
| ).images | |
| <span class="hljs-comment">#pt_to_pil(stage_2_output)[0].save("./if_stage_II.png")</span> | |
| <span class="hljs-comment"># stage 3</span> | |
| stage_3_output = stage_3(prompt=prompt, image=stage_2_output, generator=generator, noise_level=<span class="hljs-number">100</span>).images | |
| <span class="hljs-comment">#stage_3_output[0].save("./if_stage_III.png")</span> | |
| make_image_grid([original_image, pt_to_pil(stage_1_output)[<span class="hljs-number">0</span>], pt_to_pil(stage_2_output)[<span class="hljs-number">0</span>], stage_3_output[<span class="hljs-number">0</span>]], rows=<span class="hljs-number">1</span>, rows=<span class="hljs-number">4</span>)`,wrap:!1}}),je=new v({props:{title:"Text Guided Inpainting Generation",local:"text-guided-inpainting-generation",headingTag:"h3"}}),Fe=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGSW5wYWludGluZ1BpcGVsaW5lJTJDJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUlMkMlMjBEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMkMlMjBsb2FkX2ltYWdlJTJDJTIwbWFrZV9pbWFnZV9ncmlkJTBBaW1wb3J0JTIwdG9yY2glMEElMEElMjMlMjBkb3dubG9hZCUyMGltYWdlJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGZGlmZnVzZXJzJTJGZG9jcy1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmlmJTJGcGVyc29uLnBuZyUyMiUwQW9yaWdpbmFsX2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSh1cmwpJTBBJTBBJTIzJTIwZG93bmxvYWQlMjBtYXNrJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGZGlmZnVzZXJzJTJGZG9jcy1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmlmJTJGZ2xhc3Nlc19tYXNrLnBuZyUyMiUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKHVybCklMEElMEElMjMlMjBzdGFnZSUyMDElMEFzdGFnZV8xJTIwJTNEJTIwSUZJbnBhaW50aW5nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQXN0YWdlXzEuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEElMjMlMjBzdGFnZSUyMDIlMEFzdGFnZV8yJTIwJTNEJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFzdGFnZV8yLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBJTIzJTIwc3RhZ2UlMjAzJTBBc2FmZXR5X21vZHVsZXMlMjAlM0QlMjAlN0IlMEElMjAlMjAlMjAlMjAlMjJmZWF0dXJlX2V4dHJhY3RvciUyMiUzQSUyMHN0YWdlXzEuZmVhdHVyZV9leHRyYWN0b3IlMkMlMEElMjAlMjAlMjAlMjAlMjJzYWZldHlfY2hlY2tlciUyMiUzQSUyMHN0YWdlXzEuc2FmZXR5X2NoZWNrZXIlMkMlMEElMjAlMjAlMjAlMjAlMjJ3YXRlcm1hcmtlciUyMiUzQSUyMHN0YWdlXzEud2F0ZXJtYXJrZXIlMkMlMEElN0QlMEFzdGFnZV8zJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJpbGl0eWFpJTJGc3RhYmxlLWRpZmZ1c2lvbi14NC11cHNjYWxlciUyMiUyQyUyMCoqc2FmZXR5X21vZHVsZXMlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBc3RhZ2VfMy5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMmJsdWUlMjBzdW5nbGFzc2VzJTIyJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoMSklMEElMEElMjMlMjB0ZXh0JTIwZW1iZWRzJTBBcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX2VtYmVkcyUyMCUzRCUyMHN0YWdlXzEuZW5jb2RlX3Byb21wdChwcm9tcHQpJTBBJTBBJTIzJTIwc3RhZ2UlMjAxJTBBc3RhZ2VfMV9vdXRwdXQlMjAlM0QlMjBzdGFnZV8xKCUwQSUyMCUyMCUyMCUyMGltYWdlJTNEb3JpZ2luYWxfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRGdlbmVyYXRvciUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEEpLmltYWdlcyUwQSUyM3B0X3RvX3BpbChzdGFnZV8xX291dHB1dCklNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQSUyMyUyMHN0YWdlJTIwMiUwQXN0YWdlXzJfb3V0cHV0JTIwJTNEJTIwc3RhZ2VfMiglMEElMjAlMjAlMjAlMjBpbWFnZSUzRHN0YWdlXzFfb3V0cHV0JTJDJTBBJTIwJTIwJTIwJTIwb3JpZ2luYWxfaW1hZ2UlM0RvcmlnaW5hbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG1hc2tfaW1hZ2UlM0RtYXNrX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwcHJvbXB0X2VtYmVkcyUzRHByb21wdF9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHRfZW1iZWRzJTNEbmVnYXRpdmVfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJwdCUyMiUyQyUwQSkuaW1hZ2VzJTBBJTIzcHRfdG9fcGlsKHN0YWdlXzFfb3V0cHV0KSU1QjAlNUQuc2F2ZSglMjIuJTJGaWZfc3RhZ2VfSUkucG5nJTIyKSUwQSUwQSUyMyUyMHN0YWdlJTIwMyUwQXN0YWdlXzNfb3V0cHV0JTIwJTNEJTIwc3RhZ2VfMyhwcm9tcHQlM0Rwcm9tcHQlMkMlMjBpbWFnZSUzRHN0YWdlXzJfb3V0cHV0JTJDJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTIwbm9pc2VfbGV2ZWwlM0QxMDApLmltYWdlcyUwQSUyM3N0YWdlXzNfb3V0cHV0JTVCMCU1RC5zYXZlKCUyMi4lMkZpZl9zdGFnZV9JSUkucG5nJTIyKSUwQW1ha2VfaW1hZ2VfZ3JpZCglNUJvcmlnaW5hbF9pbWFnZSUyQyUyMG1hc2tfaW1hZ2UlMkMlMjBwdF90b19waWwoc3RhZ2VfMV9vdXRwdXQpJTVCMCU1RCUyQyUyMHB0X3RvX3BpbChzdGFnZV8yX291dHB1dCklNUIwJTVEJTJDJTIwc3RhZ2VfM19vdXRwdXQlNUIwJTVEJTVEJTJDJTIwcm93cyUzRDElMkMlMjByb3dzJTNENSk=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil, load_image, make_image_grid | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-comment"># download image</span> | |
| url = <span class="hljs-string">"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"</span> | |
| original_image = load_image(url) | |
| <span class="hljs-comment"># download mask</span> | |
| url = <span class="hljs-string">"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"</span> | |
| mask_image = load_image(url) | |
| <span class="hljs-comment"># stage 1</span> | |
| stage_1 = IFInpaintingPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| stage_1.enable_model_cpu_offload() | |
| <span class="hljs-comment"># stage 2</span> | |
| stage_2 = IFInpaintingSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| ) | |
| stage_2.enable_model_cpu_offload() | |
| <span class="hljs-comment"># stage 3</span> | |
| safety_modules = { | |
| <span class="hljs-string">"feature_extractor"</span>: stage_1.feature_extractor, | |
| <span class="hljs-string">"safety_checker"</span>: stage_1.safety_checker, | |
| <span class="hljs-string">"watermarker"</span>: stage_1.watermarker, | |
| } | |
| stage_3 = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-string">"stabilityai/stable-diffusion-x4-upscaler"</span>, **safety_modules, torch_dtype=torch.float16 | |
| ) | |
| stage_3.enable_model_cpu_offload() | |
| prompt = <span class="hljs-string">"blue sunglasses"</span> | |
| generator = torch.manual_seed(<span class="hljs-number">1</span>) | |
| <span class="hljs-comment"># text embeds</span> | |
| prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) | |
| <span class="hljs-comment"># stage 1</span> | |
| stage_1_output = stage_1( | |
| image=original_image, | |
| mask_image=mask_image, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_embeds, | |
| generator=generator, | |
| output_type=<span class="hljs-string">"pt"</span>, | |
| ).images | |
| <span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save("./if_stage_I.png")</span> | |
| <span class="hljs-comment"># stage 2</span> | |
| stage_2_output = stage_2( | |
| image=stage_1_output, | |
| original_image=original_image, | |
| mask_image=mask_image, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_embeds, | |
| generator=generator, | |
| output_type=<span class="hljs-string">"pt"</span>, | |
| ).images | |
| <span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save("./if_stage_II.png")</span> | |
| <span class="hljs-comment"># stage 3</span> | |
| stage_3_output = stage_3(prompt=prompt, image=stage_2_output, generator=generator, noise_level=<span class="hljs-number">100</span>).images | |
| <span class="hljs-comment">#stage_3_output[0].save("./if_stage_III.png")</span> | |
| make_image_grid([original_image, mask_image, pt_to_pil(stage_1_output)[<span class="hljs-number">0</span>], pt_to_pil(stage_2_output)[<span class="hljs-number">0</span>], stage_3_output[<span class="hljs-number">0</span>]], rows=<span class="hljs-number">1</span>, rows=<span class="hljs-number">5</span>)`,wrap:!1}}),Xe=new v({props:{title:"Converting between different pipelines",local:"converting-between-different-pipelines",headingTag:"h3"}}),Ge=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGUGlwZWxpbmUlMkMlMjBJRlN1cGVyUmVzb2x1dGlvblBpcGVsaW5lJTBBJTBBcGlwZV8xJTIwJTNEJTIwSUZQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyKSUwQXBpcGVfMiUyMCUzRCUyMElGU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkRlZXBGbG95ZCUyRklGLUlJLUwtdjEuMCUyMiklMEElMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSUZJbWcySW1nUGlwZWxpbmUlMkMlMjBJRkltZzJJbWdTdXBlclJlc29sdXRpb25QaXBlbGluZSUwQSUwQXBpcGVfMSUyMCUzRCUyMElGSW1nMkltZ1BpcGVsaW5lKCoqcGlwZV8xLmNvbXBvbmVudHMpJTBBcGlwZV8yJTIwJTNEJTIwSUZJbWcySW1nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUoKipwaXBlXzIuY29tcG9uZW50cyklMEElMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSUZJbnBhaW50aW5nUGlwZWxpbmUlMkMlMjBJRklucGFpbnRpbmdTdXBlclJlc29sdXRpb25QaXBlbGluZSUwQSUwQXBpcGVfMSUyMCUzRCUyMElGSW5wYWludGluZ1BpcGVsaW5lKCoqcGlwZV8xLmNvbXBvbmVudHMpJTBBcGlwZV8yJTIwJTNEJTIwSUZJbnBhaW50aW5nU3VwZXJSZXNvbHV0aW9uUGlwZWxpbmUoKipwaXBlXzIuY29tcG9uZW50cyk=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFPipeline, IFSuperResolutionPipeline | |
| pipe_1 = IFPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>) | |
| pipe_2 = IFSuperResolutionPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>) | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline | |
| pipe_1 = IFImg2ImgPipeline(**pipe_1.components) | |
| pipe_2 = IFImg2ImgSuperResolutionPipeline(**pipe_2.components) | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline | |
| pipe_1 = IFInpaintingPipeline(**pipe_1.components) | |
| pipe_2 = IFInpaintingSuperResolutionPipeline(**pipe_2.components)`,wrap:!1}}),Ce=new v({props:{title:"Optimizing for speed",local:"optimizing-for-speed",headingTag:"h3"}}),Ve=new J({props:{code:"cGlwZSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIp",highlighted:`pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>)`,wrap:!1}}),Ee=new J({props:{code:"cGlwZSglMjIlM0Nwcm9tcHQlM0UlMjIlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAp",highlighted:'pipe(<span class="hljs-string">"<prompt>"</span>, num_inference_steps=<span class="hljs-number">30</span>)',wrap:!1}}),$e=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2Vycy5waXBlbGluZXMuZGVlcGZsb3lkX2lmJTIwaW1wb3J0JTIwZmFzdDI3X3RpbWVzdGVwcyUwQSUwQXBpcGUoJTIyJTNDcHJvbXB0JTNFJTIyJTJDJTIwdGltZXN0ZXBzJTNEZmFzdDI3X3RpbWVzdGVwcyk=",highlighted:`<span class="hljs-keyword">from</span> diffusers.pipelines.deepfloyd_if <span class="hljs-keyword">import</span> fast27_timesteps | |
| pipe(<span class="hljs-string">"<prompt>"</span>, timesteps=fast27_timesteps)`,wrap:!1}}),Ye=new J({props:{code:"cGlwZSUyMCUzRCUyMElGSW1nMkltZ1BpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKGltYWdlJTNEaW1hZ2UlMkMlMjBwcm9tcHQlM0QlMjIlM0Nwcm9tcHQlM0UlMjIlMkMlMjBzdHJlbmd0aCUzRDAuMykuaW1hZ2Vz",highlighted:`pipe = IFImg2ImgPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| image = pipe(image=image, prompt=<span class="hljs-string">"<prompt>"</span>, strength=<span class="hljs-number">0.3</span>).images`,wrap:!1}}),He=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2KSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFwaXBlLnRleHRfZW5jb2RlciUyMCUzRCUyMHRvcmNoLmNvbXBpbGUocGlwZS50ZXh0X2VuY29kZXIlMkMlMjBtb2RlJTNEJTIycmVkdWNlLW92ZXJoZWFkJTIyJTJDJTIwZnVsbGdyYXBoJTNEVHJ1ZSklMEFwaXBlLnVuZXQlMjAlM0QlMjB0b3JjaC5jb21waWxlKHBpcGUudW5ldCUyQyUyMG1vZGUlM0QlMjJyZWR1Y2Utb3ZlcmhlYWQlMjIlMkMlMjBmdWxsZ3JhcGglM0RUcnVlKQ==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| pipe.text_encoder = torch.<span class="hljs-built_in">compile</span>(pipe.text_encoder, mode=<span class="hljs-string">"reduce-overhead"</span>, fullgraph=<span class="hljs-literal">True</span>) | |
| pipe.unet = torch.<span class="hljs-built_in">compile</span>(pipe.unet, mode=<span class="hljs-string">"reduce-overhead"</span>, fullgraph=<span class="hljs-literal">True</span>)`,wrap:!1}}),ze=new v({props:{title:"Optimizing for memory",local:"optimizing-for-memory",headingTag:"h3"}}),De=new J({props:{code:"cGlwZSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgp",highlighted:`pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| pipe.enable_model_cpu_offload()`,wrap:!1}}),Ke=new J({props:{code:"cGlwZSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlLmVuYWJsZV9zZXF1ZW50aWFsX2NwdV9vZmZsb2FkKCk=",highlighted:`pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16) | |
| pipe.enable_sequential_cpu_offload()`,wrap:!1}}),et=new J({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMFQ1RW5jb2Rlck1vZGVsJTBBJTBBdGV4dF9lbmNvZGVyJTIwJTNEJTIwVDVFbmNvZGVyTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHN1YmZvbGRlciUzRCUyMnRleHRfZW5jb2RlciUyMiUyQyUyMGRldmljZV9tYXAlM0QlMjJhdXRvJTIyJTJDJTIwbG9hZF9pbl84Yml0JTNEVHJ1ZSUyQyUyMHZhcmlhbnQlM0QlMjI4Yml0JTIyJTBBKSUwQSUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBEaWZmdXNpb25QaXBlbGluZSUwQSUwQXBpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTNEdGV4dF9lbmNvZGVyJTJDJTIwJTIwJTIzJTIwcGFzcyUyMHRoZSUyMHByZXZpb3VzbHklMjBpbnN0YW50aWF0ZWQlMjA4Yml0JTIwdGV4dCUyMGVuY29kZXIlMEElMjAlMjAlMjAlMjB1bmV0JTNETm9uZSUyQyUwQSUyMCUyMCUyMCUyMGRldmljZV9tYXAlM0QlMjJhdXRvJTIyJTJDJTBBKSUwQSUwQXByb21wdF9lbWJlZHMlMkMlMjBuZWdhdGl2ZV9lbWJlZHMlMjAlM0QlMjBwaXBlLmVuY29kZV9wcm9tcHQoJTIyJTNDcHJvbXB0JTNFJTIyKQ==",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel | |
| text_encoder = T5EncoderModel.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, subfolder=<span class="hljs-string">"text_encoder"</span>, device_map=<span class="hljs-string">"auto"</span>, load_in_8bit=<span class="hljs-literal">True</span>, variant=<span class="hljs-string">"8bit"</span> | |
| ) | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| pipe = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, | |
| text_encoder=text_encoder, <span class="hljs-comment"># pass the previously instantiated 8bit text encoder</span> | |
| unet=<span class="hljs-literal">None</span>, | |
| device_map=<span class="hljs-string">"auto"</span>, | |
| ) | |
| prompt_embeds, negative_embeds = pipe.encode_prompt(<span class="hljs-string">"<prompt>"</span>)`,wrap:!1}}),nt=new J({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMElGUGlwZWxpbmUlMkMlMjBJRlN1cGVyUmVzb2x1dGlvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEFpbXBvcnQlMjBnYyUwQWZyb20lMjB0cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBUNUVuY29kZXJNb2RlbCUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBwdF90b19waWwlMkMlMjBtYWtlX2ltYWdlX2dyaWQlMEElMEF0ZXh0X2VuY29kZXIlMjAlM0QlMjBUNUVuY29kZXJNb2RlbC5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSS1YTC12MS4wJTIyJTJDJTIwc3ViZm9sZGVyJTNEJTIydGV4dF9lbmNvZGVyJTIyJTJDJTIwZGV2aWNlX21hcCUzRCUyMmF1dG8lMjIlMkMlMjBsb2FkX2luXzhiaXQlM0RUcnVlJTJDJTIwdmFyaWFudCUzRCUyMjhiaXQlMjIlMEEpJTBBJTBBJTIzJTIwdGV4dCUyMHRvJTIwaW1hZ2UlMEFwaXBlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUwQSUyMCUyMCUyMCUyMHRleHRfZW5jb2RlciUzRHRleHRfZW5jb2RlciUyQyUyMCUyMCUyMyUyMHBhc3MlMjB0aGUlMjBwcmV2aW91c2x5JTIwaW5zdGFudGlhdGVkJTIwOGJpdCUyMHRleHQlMjBlbmNvZGVyJTBBJTIwJTIwJTIwJTIwdW5ldCUzRE5vbmUlMkMlMEElMjAlMjAlMjAlMjBkZXZpY2VfbWFwJTNEJTIyYXV0byUyMiUyQyUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAnYSUyMHBob3RvJTIwb2YlMjBhJTIwa2FuZ2Fyb28lMjB3ZWFyaW5nJTIwYW4lMjBvcmFuZ2UlMjBob29kaWUlMjBhbmQlMjBibHVlJTIwc3VuZ2xhc3NlcyUyMHN0YW5kaW5nJTIwaW4lMjBmcm9udCUyMG9mJTIwdGhlJTIwZWlmZmVsJTIwdG93ZXIlMjBob2xkaW5nJTIwYSUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMCUyMnZlcnklMjBkZWVwJTIwbGVhcm5pbmclMjInJTBBcHJvbXB0X2VtYmVkcyUyQyUyMG5lZ2F0aXZlX2VtYmVkcyUyMCUzRCUyMHBpcGUuZW5jb2RlX3Byb21wdChwcm9tcHQpJTBBJTBBJTIzJTIwUmVtb3ZlJTIwdGhlJTIwcGlwZWxpbmUlMjBzbyUyMHdlJTIwY2FuJTIwcmUtbG9hZCUyMHRoZSUyMHBpcGVsaW5lJTIwd2l0aCUyMHRoZSUyMHVuZXQlMEFkZWwlMjB0ZXh0X2VuY29kZXIlMEFkZWwlMjBwaXBlJTBBZ2MuY29sbGVjdCgpJTBBdG9yY2guY3VkYS5lbXB0eV9jYWNoZSgpJTBBJTBBcGlwZSUyMCUzRCUyMElGUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkRlZXBGbG95ZCUyRklGLUktWEwtdjEuMCUyMiUyQyUyMHRleHRfZW5jb2RlciUzRE5vbmUlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUyMGRldmljZV9tYXAlM0QlMjJhdXRvJTIyJTBBKSUwQSUwQWdlbmVyYXRvciUyMCUzRCUyMHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDApJTBBc3RhZ2VfMV9vdXRwdXQlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEEpLmltYWdlcyUwQSUwQSUyM3B0X3RvX3BpbChzdGFnZV8xX291dHB1dCklNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0kucG5nJTIyKSUwQSUwQSUyMyUyMFJlbW92ZSUyMHRoZSUyMHBpcGVsaW5lJTIwc28lMjB3ZSUyMGNhbiUyMGxvYWQlMjB0aGUlMjBzdXBlci1yZXNvbHV0aW9uJTIwcGlwZWxpbmUlMEFkZWwlMjBwaXBlJTBBZ2MuY29sbGVjdCgpJTBBdG9yY2guY3VkYS5lbXB0eV9jYWNoZSgpJTBBJTBBJTIzJTIwRmlyc3QlMjBzdXBlciUyMHJlc29sdXRpb24lMEElMEFwaXBlJTIwJTNEJTIwSUZTdXBlclJlc29sdXRpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRGVlcEZsb3lkJTJGSUYtSUktTC12MS4wJTIyJTJDJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTIwZGV2aWNlX21hcCUzRCUyMmF1dG8lMjIlMEEpJTBBJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoMCklMEFzdGFnZV8yX291dHB1dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RzdGFnZV8xX291dHB1dCUyQyUwQSUyMCUyMCUyMCUyMHByb21wdF9lbWJlZHMlM0Rwcm9tcHRfZW1iZWRzJTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0X2VtYmVkcyUzRG5lZ2F0aXZlX2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIycHQlMjIlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEEpLmltYWdlcyUwQSUwQSUyM3B0X3RvX3BpbChzdGFnZV8yX291dHB1dCklNUIwJTVELnNhdmUoJTIyLiUyRmlmX3N0YWdlX0lJLnBuZyUyMiklMEFtYWtlX2ltYWdlX2dyaWQoJTVCcHRfdG9fcGlsKHN0YWdlXzFfb3V0cHV0KSU1QjAlNUQlMkMlMjBwdF90b19waWwoc3RhZ2VfMl9vdXRwdXQpJTVCMCU1RCU1RCUyQyUyMHJvd3MlM0QxJTJDJTIwcm93cyUzRDIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> IFPipeline, IFSuperResolutionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">import</span> gc | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> pt_to_pil, make_image_grid | |
| text_encoder = T5EncoderModel.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, subfolder=<span class="hljs-string">"text_encoder"</span>, device_map=<span class="hljs-string">"auto"</span>, load_in_8bit=<span class="hljs-literal">True</span>, variant=<span class="hljs-string">"8bit"</span> | |
| ) | |
| <span class="hljs-comment"># text to image</span> | |
| pipe = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, | |
| text_encoder=text_encoder, <span class="hljs-comment"># pass the previously instantiated 8bit text encoder</span> | |
| unet=<span class="hljs-literal">None</span>, | |
| device_map=<span class="hljs-string">"auto"</span>, | |
| ) | |
| prompt = <span class="hljs-string">'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"'</span> | |
| prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) | |
| <span class="hljs-comment"># Remove the pipeline so we can re-load the pipeline with the unet</span> | |
| <span class="hljs-keyword">del</span> text_encoder | |
| <span class="hljs-keyword">del</span> pipe | |
| gc.collect() | |
| torch.cuda.empty_cache() | |
| pipe = IFPipeline.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-I-XL-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16, device_map=<span class="hljs-string">"auto"</span> | |
| ) | |
| generator = torch.Generator().manual_seed(<span class="hljs-number">0</span>) | |
| stage_1_output = pipe( | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_embeds, | |
| output_type=<span class="hljs-string">"pt"</span>, | |
| generator=generator, | |
| ).images | |
| <span class="hljs-comment">#pt_to_pil(stage_1_output)[0].save("./if_stage_I.png")</span> | |
| <span class="hljs-comment"># Remove the pipeline so we can load the super-resolution pipeline</span> | |
| <span class="hljs-keyword">del</span> pipe | |
| gc.collect() | |
| torch.cuda.empty_cache() | |
| <span class="hljs-comment"># First super resolution</span> | |
| pipe = IFSuperResolutionPipeline.from_pretrained( | |
| <span class="hljs-string">"DeepFloyd/IF-II-L-v1.0"</span>, text_encoder=<span class="hljs-literal">None</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16, device_map=<span class="hljs-string">"auto"</span> | |
| ) | |
| generator = torch.Generator().manual_seed(<span class="hljs-number">0</span>) | |
| stage_2_output = pipe( | |
| image=stage_1_output, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_embeds, | |
| output_type=<span class="hljs-string">"pt"</span>, | |
| generator=generator, | |
| ).images | |
| <span class="hljs-comment">#pt_to_pil(stage_2_output)[0].save("./if_stage_II.png")</span> | |
| make_image_grid([pt_to_pil(stage_1_output)[<span class="hljs-number">0</span>], pt_to_pil(stage_2_output)[<span class="hljs-number">0</span>]], rows=<span class="hljs-number">1</span>, rows=<span class="hljs-number">2</span>)`,wrap:!1}}),st=new v({props:{title:"Available Pipelines:",local:"available-pipelines",headingTag:"h2"}}),lt=new v({props:{title:"IFPipeline",local:"diffusers.IFPipeline",headingTag:"h2"}}),ot=new Z({props:{name:"class diffusers.IFPipeline",anchor:"diffusers.IFPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py#L96"}}),it=new Z({props:{name:"__call__",anchor:"diffusers.IFPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 100"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"clean_caption",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"}],parametersDescription:[{anchor:"diffusers.IFPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.IFPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 100) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code> | |
| timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.IFPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.IFPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12762/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that will be called every <code>callback_steps</code> steps during inference. The function will be | |
| called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be | |
| called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"},{anchor:"diffusers.IFPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py#L547",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),H=new Kt({props:{anchor:"diffusers.IFPipeline.__call__.example",$$slots:{default:[La]},$$scope:{ctx:j}}}),pt=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead. | |
| Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py#L168"}}),rt=new v({props:{title:"IFSuperResolutionPipeline",local:"diffusers.IFSuperResolutionPipeline",headingTag:"h2"}}),ct=new Z({props:{name:"class diffusers.IFSuperResolutionPipeline",anchor:"diffusers.IFSuperResolutionPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"image_noising_scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py#L82"}}),dt=new Z({props:{name:"__call__",anchor:"diffusers.IFSuperResolutionPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"height",val:": int = None"},{name:"width",val:": int = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 4.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"noise_level",val:": int = 250"},{name:"clean_caption",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.IFSuperResolutionPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to None) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to None) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>torch.Tensor</code>) — | |
| The image to be upscaled.`,name:"image"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>, defaults to None) — | |
| Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code> | |
| timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12762/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that will be called every <code>callback_steps</code> steps during inference. The function will be | |
| called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be | |
| called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.noise_level",description:`<strong>noise_level</strong> (<code>int</code>, <em>optional</em>, defaults to 250) — | |
| The amount of noise to add to the upscaled image. Must be in the range <code>[0, 1000)</code>`,name:"noise_level"},{anchor:"diffusers.IFSuperResolutionPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py#L614",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),q=new Kt({props:{anchor:"diffusers.IFSuperResolutionPipeline.__call__.example",$$slots:{default:[Da]},$$scope:{ctx:j}}}),mt=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead. | |
| Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFSuperResolutionPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py#L302"}}),gt=new v({props:{title:"IFImg2ImgPipeline",local:"diffusers.IFImg2ImgPipeline",headingTag:"h2"}}),ut=new Z({props:{name:"class diffusers.IFImg2ImgPipeline",anchor:"diffusers.IFImg2ImgPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py#L120"}}),ft=new Z({props:{name:"__call__",anchor:"diffusers.IFImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"strength",val:": float = 0.7"},{name:"num_inference_steps",val:": int = 80"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 10.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"clean_caption",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"}],parametersDescription:[{anchor:"diffusers.IFImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) — | |
| <code>Image</code>, or tensor representing an image batch, that will be used as the starting point for the | |
| process.`,name:"image"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.7) — | |
| Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> | |
| will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of | |
| denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will | |
| be maximum and the denoising process will run for the full number of iterations specified in | |
| <code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 80) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code> | |
| timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 10.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12762/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that will be called every <code>callback_steps</code> steps during inference. The function will be | |
| called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be | |
| called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"},{anchor:"diffusers.IFImg2ImgPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py#L661",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),D=new Kt({props:{anchor:"diffusers.IFImg2ImgPipeline.__call__.example",$$slots:{default:[Aa]},$$scope:{ctx:j}}}),ht=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead. | |
| Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFImg2ImgPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py#L192"}}),_t=new v({props:{title:"IFImg2ImgSuperResolutionPipeline",local:"diffusers.IFImg2ImgSuperResolutionPipeline",headingTag:"h2"}}),yt=new Z({props:{name:"class diffusers.IFImg2ImgSuperResolutionPipeline",anchor:"diffusers.IFImg2ImgSuperResolutionPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"image_noising_scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py#L124"}}),Mt=new Z({props:{name:"__call__",anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor]"},{name:"original_image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"strength",val:": float = 0.8"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 4.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"noise_level",val:": int = 250"},{name:"clean_caption",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) — | |
| <code>Image</code>, or tensor representing an image batch, that will be used as the starting point for the | |
| process.`,name:"image"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.original_image",description:`<strong>original_image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) — | |
| The original image that <code>image</code> was varied from.`,name:"original_image"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) — | |
| Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> | |
| will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of | |
| denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will | |
| be maximum and the denoising process will run for the full number of iterations specified in | |
| <code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code> | |
| timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12762/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that will be called every <code>callback_steps</code> steps during inference. The function will be | |
| called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be | |
| called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.noise_level",description:`<strong>noise_level</strong> (<code>int</code>, <em>optional</em>, defaults to 250) — | |
| The amount of noise to add to the upscaled image. Must be in the range <code>[0, 1000)</code>`,name:"noise_level"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py#L744",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),K=new Kt({props:{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.__call__.example",$$slots:{default:[Ka]},$$scope:{ctx:j}}}),bt=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead. | |
| Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFImg2ImgSuperResolutionPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py#L344"}}),It=new v({props:{title:"IFInpaintingPipeline",local:"diffusers.IFInpaintingPipeline",headingTag:"h2"}}),wt=new Z({props:{name:"class diffusers.IFInpaintingPipeline",anchor:"diffusers.IFInpaintingPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py#L123"}}),Jt=new Z({props:{name:"__call__",anchor:"diffusers.IFInpaintingPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"strength",val:": float = 1.0"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"clean_caption",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"}],parametersDescription:[{anchor:"diffusers.IFInpaintingPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.IFInpaintingPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) — | |
| <code>Image</code>, or tensor representing an image batch, that will be used as the starting point for the | |
| process.`,name:"image"},{anchor:"diffusers.IFInpaintingPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>PIL.Image.Image</code>) — | |
| <code>Image</code>, or tensor representing an image batch, to mask <code>image</code>. White pixels in the mask will be | |
| repainted, while black pixels will be preserved. If <code>mask_image</code> is a PIL image, it will be converted | |
| to a single channel (luminance) before use. If it’s a tensor, it should contain one color channel (L) | |
| instead of 3, so the expected shape would be <code>(B, H, W, 1)</code>.`,name:"mask_image"},{anchor:"diffusers.IFInpaintingPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> | |
| will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of | |
| denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will | |
| be maximum and the denoising process will run for the full number of iterations specified in | |
| <code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.IFInpaintingPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFInpaintingPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code> | |
| timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFInpaintingPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFInpaintingPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFInpaintingPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFInpaintingPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12762/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFInpaintingPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFInpaintingPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFInpaintingPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFInpaintingPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFInpaintingPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFInpaintingPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that will be called every <code>callback_steps</code> steps during inference. The function will be | |
| called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFInpaintingPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be | |
| called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFInpaintingPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"},{anchor:"diffusers.IFInpaintingPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py#L753",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),ee=new Kt({props:{anchor:"diffusers.IFInpaintingPipeline.__call__.example",$$slots:{default:[Oa]},$$scope:{ctx:j}}}),Ut=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFInpaintingPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead. | |
| Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFInpaintingPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py#L195"}}),Tt=new v({props:{title:"IFInpaintingSuperResolutionPipeline",local:"diffusers.IFInpaintingSuperResolutionPipeline",headingTag:"h2"}}),Zt=new Z({props:{name:"class diffusers.IFInpaintingSuperResolutionPipeline",anchor:"diffusers.IFInpaintingSuperResolutionPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"image_noising_scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.safety_checker.IFSafetyChecker]"},{name:"feature_extractor",val:": typing.Optional[transformers.models.clip.image_processing_clip.CLIPImageProcessor]"},{name:"watermarker",val:": typing.Optional[diffusers.pipelines.deepfloyd_if.watermark.IFWatermarker]"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py#L126"}}),vt=new Z({props:{name:"__call__",anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor]"},{name:"original_image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, torch.Tensor, numpy.ndarray, typing.List[PIL.Image.Image], typing.List[torch.Tensor], typing.List[numpy.ndarray]] = None"},{name:"strength",val:": float = 0.8"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 100"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 4.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"noise_level",val:": int = 0"},{name:"clean_caption",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) — | |
| <code>Image</code>, or tensor representing an image batch, that will be used as the starting point for the | |
| process.`,name:"image"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.original_image",description:`<strong>original_image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code>) — | |
| The original image that <code>image</code> was varied from.`,name:"original_image"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>PIL.Image.Image</code>) — | |
| <code>Image</code>, or tensor representing an image batch, to mask <code>image</code>. White pixels in the mask will be | |
| repainted, while black pixels will be preserved. If <code>mask_image</code> is a PIL image, it will be converted | |
| to a single channel (luminance) before use. If it’s a tensor, it should contain one color channel (L) | |
| instead of 3, so the expected shape would be <code>(B, H, W, 1)</code>.`,name:"mask_image"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) — | |
| Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> | |
| will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of | |
| denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will | |
| be maximum and the denoising process will run for the full number of iterations specified in | |
| <code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 100) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process. If not defined, equal spaced <code>num_inference_steps</code> | |
| timesteps are used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12762/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that will be called every <code>callback_steps</code> steps during inference. The function will be | |
| called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be | |
| called at every step.`,name:"callback_steps"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.noise_level",description:`<strong>noise_level</strong> (<code>int</code>, <em>optional</em>, defaults to 0) — | |
| The amount of noise to add to the upscaled image. Must be in the range <code>[0, 1000)</code>`,name:"noise_level"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py#L832",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) or watermarked content, according to the </code>safety_checker\`.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.IFPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),ne=new Kt({props:{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.__call__.example",$$slots:{default:[el]},$$scope:{ctx:j}}}),jt=new Z({props:{name:"encode_prompt",anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code>. instead. If not defined, one has to pass <code>negative_prompt_embeds</code>. instead. | |
| Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.IFInpaintingSuperResolutionPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (bool, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py#L346"}}),Wt=new qa({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/deepfloyd_if.md"}}),{c(){f=i("meta"),w=a(),b=i("p"),_=a(),r(M.$$.fragment),o=a(),r(I.$$.fragment),tn=a(),P=i("div"),P.innerHTML=Os,nn=a(),r(ae.$$.fragment),sn=a(),le=i("p"),le.textContent=ea,an=a(),oe=i("ul"),oe.innerHTML=ta,ln=a(),r(ie.$$.fragment),on=a(),pe=i("p"),pe.textContent=na,pn=a(),re=i("ol"),re.innerHTML=sa,rn=a(),r(ce.$$.fragment),cn=a(),de=i("p"),de.textContent=aa,dn=a(),r(me.$$.fragment),mn=a(),ge=i("p"),ge.innerHTML=la,gn=a(),ue=i("p"),ue.innerHTML=oa,un=a(),r(fe.$$.fragment),fn=a(),he=i("p"),he.textContent=ia,hn=a(),_e=i("ul"),_e.innerHTML=pa,_n=a(),ye=i("p"),ye.innerHTML=ra,yn=a(),Me=i("ul"),Me.innerHTML=ca,Mn=a(),be=i("p"),be.innerHTML=da,bn=a(),r(Ie.$$.fragment),In=a(),we=i("p"),we.innerHTML=ma,wn=a(),r(Je.$$.fragment),Jn=a(),r(Ue.$$.fragment),Un=a(),Te=i("p"),Te.innerHTML=ga,Tn=a(),Ze=i("p"),Ze.innerHTML=ua,Zn=a(),r(ve.$$.fragment),vn=a(),r(je.$$.fragment),jn=a(),We=i("p"),We.innerHTML=fa,Wn=a(),Be=i("p"),Be.innerHTML=ha,Bn=a(),r(Fe.$$.fragment),Fn=a(),r(Xe.$$.fragment),Xn=a(),ke=i("p"),ke.innerHTML=_a,kn=a(),r(Ge.$$.fragment),Gn=a(),r(Ce.$$.fragment),Cn=a(),Re=i("p"),Re.textContent=ya,Rn=a(),r(Ve.$$.fragment),Vn=a(),xe=i("p"),xe.textContent=Ma,xn=a(),Se=i("p"),Se.innerHTML=ba,Sn=a(),r(Ee.$$.fragment),En=a(),Ne=i("p"),Ne.innerHTML=Ia,Nn=a(),r($e.$$.fragment),$n=a(),Qe=i("p"),Qe.textContent=wa,Qn=a(),r(Ye.$$.fragment),Yn=a(),Pe=i("p"),Pe.innerHTML=Ja,Pn=a(),r(He.$$.fragment),Hn=a(),r(ze.$$.fragment),zn=a(),qe=i("p"),qe.textContent=Ua,qn=a(),Le=i("p"),Le.textContent=Ta,Ln=a(),r(De.$$.fragment),Dn=a(),Ae=i("p"),Ae.textContent=Za,An=a(),r(Ke.$$.fragment),Kn=a(),Oe=i("p"),Oe.textContent=va,On=a(),r(et.$$.fragment),es=a(),tt=i("p"),tt.textContent=ja,ts=a(),r(nt.$$.fragment),ns=a(),r(st.$$.fragment),ss=a(),at=i("table"),at.innerHTML=Wa,as=a(),r(lt.$$.fragment),ls=a(),W=i("div"),r(ot.$$.fragment),Js=a(),C=i("div"),r(it.$$.fragment),Us=a(),Xt=i("p"),Xt.textContent=Ba,Ts=a(),r(H.$$.fragment),Zs=a(),z=i("div"),r(pt.$$.fragment),vs=a(),kt=i("p"),kt.textContent=Fa,os=a(),r(rt.$$.fragment),is=a(),B=i("div"),r(ct.$$.fragment),js=a(),R=i("div"),r(dt.$$.fragment),Ws=a(),Gt=i("p"),Gt.textContent=Xa,Bs=a(),r(q.$$.fragment),Fs=a(),L=i("div"),r(mt.$$.fragment),Xs=a(),Ct=i("p"),Ct.textContent=ka,ps=a(),r(gt.$$.fragment),rs=a(),F=i("div"),r(ut.$$.fragment),ks=a(),V=i("div"),r(ft.$$.fragment),Gs=a(),Rt=i("p"),Rt.textContent=Ga,Cs=a(),r(D.$$.fragment),Rs=a(),A=i("div"),r(ht.$$.fragment),Vs=a(),Vt=i("p"),Vt.textContent=Ca,cs=a(),r(_t.$$.fragment),ds=a(),X=i("div"),r(yt.$$.fragment),xs=a(),x=i("div"),r(Mt.$$.fragment),Ss=a(),xt=i("p"),xt.textContent=Ra,Es=a(),r(K.$$.fragment),Ns=a(),O=i("div"),r(bt.$$.fragment),$s=a(),St=i("p"),St.textContent=Va,ms=a(),r(It.$$.fragment),gs=a(),k=i("div"),r(wt.$$.fragment),Qs=a(),S=i("div"),r(Jt.$$.fragment),Ys=a(),Et=i("p"),Et.textContent=xa,Ps=a(),r(ee.$$.fragment),Hs=a(),te=i("div"),r(Ut.$$.fragment),zs=a(),Nt=i("p"),Nt.textContent=Sa,us=a(),r(Tt.$$.fragment),fs=a(),G=i("div"),r(Zt.$$.fragment),qs=a(),E=i("div"),r(vt.$$.fragment),Ls=a(),$t=i("p"),$t.textContent=Ea,Ds=a(),r(ne.$$.fragment),As=a(),se=i("div"),r(jt.$$.fragment),Ks=a(),Qt=i("p"),Qt.textContent=Na,hs=a(),r(Wt.$$.fragment),_s=a(),en=i("p"),this.h()},l(e){const t=Ha("svelte-u9bgzb",document.head);f=p(t,"META",{name:!0,content:!0}),t.forEach(n),w=l(e),b=p(e,"P",{}),T(b).forEach(n),_=l(e),c(M.$$.fragment,e),o=l(e),c(I.$$.fragment,e),tn=l(e),P=p(e,"DIV",{class:!0,"data-svelte-h":!0}),y(P)!=="svelte-1elo7hh"&&(P.innerHTML=Os),nn=l(e),c(ae.$$.fragment,e),sn=l(e),le=p(e,"P",{"data-svelte-h":!0}),y(le)!=="svelte-1234a12"&&(le.textContent=ea),an=l(e),oe=p(e,"UL",{"data-svelte-h":!0}),y(oe)!=="svelte-1h0q5fq"&&(oe.innerHTML=ta),ln=l(e),c(ie.$$.fragment,e),on=l(e),pe=p(e,"P",{"data-svelte-h":!0}),y(pe)!=="svelte-13gixbp"&&(pe.textContent=na),pn=l(e),re=p(e,"OL",{"data-svelte-h":!0}),y(re)!=="svelte-fe0b7x"&&(re.innerHTML=sa),rn=l(e),c(ce.$$.fragment,e),cn=l(e),de=p(e,"P",{"data-svelte-h":!0}),y(de)!=="svelte-1eqisl"&&(de.textContent=aa),dn=l(e),c(me.$$.fragment,e),mn=l(e),ge=p(e,"P",{"data-svelte-h":!0}),y(ge)!=="svelte-1vhgvls"&&(ge.innerHTML=la),gn=l(e),ue=p(e,"P",{"data-svelte-h":!0}),y(ue)!=="svelte-13wvpdg"&&(ue.innerHTML=oa),un=l(e),c(fe.$$.fragment,e),fn=l(e),he=p(e,"P",{"data-svelte-h":!0}),y(he)!=="svelte-12efsp0"&&(he.textContent=ia),hn=l(e),_e=p(e,"UL",{"data-svelte-h":!0}),y(_e)!=="svelte-1j0zpko"&&(_e.innerHTML=pa),_n=l(e),ye=p(e,"P",{"data-svelte-h":!0}),y(ye)!=="svelte-6cosrd"&&(ye.innerHTML=ra),yn=l(e),Me=p(e,"UL",{"data-svelte-h":!0}),y(Me)!=="svelte-1bclroz"&&(Me.innerHTML=ca),Mn=l(e),be=p(e,"P",{"data-svelte-h":!0}),y(be)!=="svelte-10fuv2z"&&(be.innerHTML=da),bn=l(e),c(Ie.$$.fragment,e),In=l(e),we=p(e,"P",{"data-svelte-h":!0}),y(we)!=="svelte-1yeiafg"&&(we.innerHTML=ma),wn=l(e),c(Je.$$.fragment,e),Jn=l(e),c(Ue.$$.fragment,e),Un=l(e),Te=p(e,"P",{"data-svelte-h":!0}),y(Te)!=="svelte-ork7qq"&&(Te.innerHTML=ga),Tn=l(e),Ze=p(e,"P",{"data-svelte-h":!0}),y(Ze)!=="svelte-1r3ur53"&&(Ze.innerHTML=ua),Zn=l(e),c(ve.$$.fragment,e),vn=l(e),c(je.$$.fragment,e),jn=l(e),We=p(e,"P",{"data-svelte-h":!0}),y(We)!=="svelte-1lgvmye"&&(We.innerHTML=fa),Wn=l(e),Be=p(e,"P",{"data-svelte-h":!0}),y(Be)!=="svelte-8txrxq"&&(Be.innerHTML=ha),Bn=l(e),c(Fe.$$.fragment,e),Fn=l(e),c(Xe.$$.fragment,e),Xn=l(e),ke=p(e,"P",{"data-svelte-h":!0}),y(ke)!=="svelte-yqwmip"&&(ke.innerHTML=_a),kn=l(e),c(Ge.$$.fragment,e),Gn=l(e),c(Ce.$$.fragment,e),Cn=l(e),Re=p(e,"P",{"data-svelte-h":!0}),y(Re)!=="svelte-15gngr3"&&(Re.textContent=ya),Rn=l(e),c(Ve.$$.fragment,e),Vn=l(e),xe=p(e,"P",{"data-svelte-h":!0}),y(xe)!=="svelte-12zfq1b"&&(xe.textContent=Ma),xn=l(e),Se=p(e,"P",{"data-svelte-h":!0}),y(Se)!=="svelte-1w6wjdd"&&(Se.innerHTML=ba),Sn=l(e),c(Ee.$$.fragment,e),En=l(e),Ne=p(e,"P",{"data-svelte-h":!0}),y(Ne)!=="svelte-1t30w48"&&(Ne.innerHTML=Ia),Nn=l(e),c($e.$$.fragment,e),$n=l(e),Qe=p(e,"P",{"data-svelte-h":!0}),y(Qe)!=="svelte-4yc4g0"&&(Qe.textContent=wa),Qn=l(e),c(Ye.$$.fragment,e),Yn=l(e),Pe=p(e,"P",{"data-svelte-h":!0}),y(Pe)!=="svelte-cpkwz5"&&(Pe.innerHTML=Ja),Pn=l(e),c(He.$$.fragment,e),Hn=l(e),c(ze.$$.fragment,e),zn=l(e),qe=p(e,"P",{"data-svelte-h":!0}),y(qe)!=="svelte-klajpi"&&(qe.textContent=Ua),qn=l(e),Le=p(e,"P",{"data-svelte-h":!0}),y(Le)!=="svelte-6d4mux"&&(Le.textContent=Ta),Ln=l(e),c(De.$$.fragment,e),Dn=l(e),Ae=p(e,"P",{"data-svelte-h":!0}),y(Ae)!=="svelte-mhfn46"&&(Ae.textContent=Za),An=l(e),c(Ke.$$.fragment,e),Kn=l(e),Oe=p(e,"P",{"data-svelte-h":!0}),y(Oe)!=="svelte-b2fswh"&&(Oe.textContent=va),On=l(e),c(et.$$.fragment,e),es=l(e),tt=p(e,"P",{"data-svelte-h":!0}),y(tt)!=="svelte-11csze6"&&(tt.textContent=ja),ts=l(e),c(nt.$$.fragment,e),ns=l(e),c(st.$$.fragment,e),ss=l(e),at=p(e,"TABLE",{"data-svelte-h":!0}),y(at)!=="svelte-5xvyn6"&&(at.innerHTML=Wa),as=l(e),c(lt.$$.fragment,e),ls=l(e),W=p(e,"DIV",{class:!0});var N=T(W);c(ot.$$.fragment,N),Js=l(N),C=p(N,"DIV",{class:!0});var $=T(C);c(it.$$.fragment,$),Us=l($),Xt=p($,"P",{"data-svelte-h":!0}),y(Xt)!=="svelte-v78lg8"&&(Xt.textContent=Ba),Ts=l($),c(H.$$.fragment,$),$.forEach(n),Zs=l(N),z=p(N,"DIV",{class:!0});var Bt=T(z);c(pt.$$.fragment,Bt),vs=l(Bt),kt=p(Bt,"P",{"data-svelte-h":!0}),y(kt)!=="svelte-16q0ax1"&&(kt.textContent=Fa),Bt.forEach(n),N.forEach(n),os=l(e),c(rt.$$.fragment,e),is=l(e),B=p(e,"DIV",{class:!0});var Q=T(B);c(ct.$$.fragment,Q),js=l(Q),R=p(Q,"DIV",{class:!0});var Y=T(R);c(dt.$$.fragment,Y),Ws=l(Y),Gt=p(Y,"P",{"data-svelte-h":!0}),y(Gt)!=="svelte-v78lg8"&&(Gt.textContent=Xa),Bs=l(Y),c(q.$$.fragment,Y),Y.forEach(n),Fs=l(Q),L=p(Q,"DIV",{class:!0});var Ft=T(L);c(mt.$$.fragment,Ft),Xs=l(Ft),Ct=p(Ft,"P",{"data-svelte-h":!0}),y(Ct)!=="svelte-16q0ax1"&&(Ct.textContent=ka),Ft.forEach(n),Q.forEach(n),ps=l(e),c(gt.$$.fragment,e),rs=l(e),F=p(e,"DIV",{class:!0});var Yt=T(F);c(ut.$$.fragment,Yt),ks=l(Yt),V=p(Yt,"DIV",{class:!0});var Pt=T(V);c(ft.$$.fragment,Pt),Gs=l(Pt),Rt=p(Pt,"P",{"data-svelte-h":!0}),y(Rt)!=="svelte-v78lg8"&&(Rt.textContent=Ga),Cs=l(Pt),c(D.$$.fragment,Pt),Pt.forEach(n),Rs=l(Yt),A=p(Yt,"DIV",{class:!0});var Ms=T(A);c(ht.$$.fragment,Ms),Vs=l(Ms),Vt=p(Ms,"P",{"data-svelte-h":!0}),y(Vt)!=="svelte-16q0ax1"&&(Vt.textContent=Ca),Ms.forEach(n),Yt.forEach(n),cs=l(e),c(_t.$$.fragment,e),ds=l(e),X=p(e,"DIV",{class:!0});var Ht=T(X);c(yt.$$.fragment,Ht),xs=l(Ht),x=p(Ht,"DIV",{class:!0});var zt=T(x);c(Mt.$$.fragment,zt),Ss=l(zt),xt=p(zt,"P",{"data-svelte-h":!0}),y(xt)!=="svelte-v78lg8"&&(xt.textContent=Ra),Es=l(zt),c(K.$$.fragment,zt),zt.forEach(n),Ns=l(Ht),O=p(Ht,"DIV",{class:!0});var bs=T(O);c(bt.$$.fragment,bs),$s=l(bs),St=p(bs,"P",{"data-svelte-h":!0}),y(St)!=="svelte-16q0ax1"&&(St.textContent=Va),bs.forEach(n),Ht.forEach(n),ms=l(e),c(It.$$.fragment,e),gs=l(e),k=p(e,"DIV",{class:!0});var qt=T(k);c(wt.$$.fragment,qt),Qs=l(qt),S=p(qt,"DIV",{class:!0});var Lt=T(S);c(Jt.$$.fragment,Lt),Ys=l(Lt),Et=p(Lt,"P",{"data-svelte-h":!0}),y(Et)!=="svelte-v78lg8"&&(Et.textContent=xa),Ps=l(Lt),c(ee.$$.fragment,Lt),Lt.forEach(n),Hs=l(qt),te=p(qt,"DIV",{class:!0});var Is=T(te);c(Ut.$$.fragment,Is),zs=l(Is),Nt=p(Is,"P",{"data-svelte-h":!0}),y(Nt)!=="svelte-16q0ax1"&&(Nt.textContent=Sa),Is.forEach(n),qt.forEach(n),us=l(e),c(Tt.$$.fragment,e),fs=l(e),G=p(e,"DIV",{class:!0});var Dt=T(G);c(Zt.$$.fragment,Dt),qs=l(Dt),E=p(Dt,"DIV",{class:!0});var At=T(E);c(vt.$$.fragment,At),Ls=l(At),$t=p(At,"P",{"data-svelte-h":!0}),y($t)!=="svelte-v78lg8"&&($t.textContent=Ea),Ds=l(At),c(ne.$$.fragment,At),At.forEach(n),As=l(Dt),se=p(Dt,"DIV",{class:!0});var ws=T(se);c(jt.$$.fragment,ws),Ks=l(ws),Qt=p(ws,"P",{"data-svelte-h":!0}),y(Qt)!=="svelte-16q0ax1"&&(Qt.textContent=Na),ws.forEach(n),Dt.forEach(n),hs=l(e),c(Wt.$$.fragment,e),_s=l(e),en=p(e,"P",{}),T(en).forEach(n),this.h()},h(){U(f,"name","hf:doc:metadata"),U(f,"content",nl),U(P,"class","flex flex-wrap space-x-1"),U(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){h(document.head,f),s(e,w,t),s(e,b,t),s(e,_,t),d(M,e,t),s(e,o,t),d(I,e,t),s(e,tn,t),s(e,P,t),s(e,nn,t),d(ae,e,t),s(e,sn,t),s(e,le,t),s(e,an,t),s(e,oe,t),s(e,ln,t),d(ie,e,t),s(e,on,t),s(e,pe,t),s(e,pn,t),s(e,re,t),s(e,rn,t),d(ce,e,t),s(e,cn,t),s(e,de,t),s(e,dn,t),d(me,e,t),s(e,mn,t),s(e,ge,t),s(e,gn,t),s(e,ue,t),s(e,un,t),d(fe,e,t),s(e,fn,t),s(e,he,t),s(e,hn,t),s(e,_e,t),s(e,_n,t),s(e,ye,t),s(e,yn,t),s(e,Me,t),s(e,Mn,t),s(e,be,t),s(e,bn,t),d(Ie,e,t),s(e,In,t),s(e,we,t),s(e,wn,t),d(Je,e,t),s(e,Jn,t),d(Ue,e,t),s(e,Un,t),s(e,Te,t),s(e,Tn,t),s(e,Ze,t),s(e,Zn,t),d(ve,e,t),s(e,vn,t),d(je,e,t),s(e,jn,t),s(e,We,t),s(e,Wn,t),s(e,Be,t),s(e,Bn,t),d(Fe,e,t),s(e,Fn,t),d(Xe,e,t),s(e,Xn,t),s(e,ke,t),s(e,kn,t),d(Ge,e,t),s(e,Gn,t),d(Ce,e,t),s(e,Cn,t),s(e,Re,t),s(e,Rn,t),d(Ve,e,t),s(e,Vn,t),s(e,xe,t),s(e,xn,t),s(e,Se,t),s(e,Sn,t),d(Ee,e,t),s(e,En,t),s(e,Ne,t),s(e,Nn,t),d($e,e,t),s(e,$n,t),s(e,Qe,t),s(e,Qn,t),d(Ye,e,t),s(e,Yn,t),s(e,Pe,t),s(e,Pn,t),d(He,e,t),s(e,Hn,t),d(ze,e,t),s(e,zn,t),s(e,qe,t),s(e,qn,t),s(e,Le,t),s(e,Ln,t),d(De,e,t),s(e,Dn,t),s(e,Ae,t),s(e,An,t),d(Ke,e,t),s(e,Kn,t),s(e,Oe,t),s(e,On,t),d(et,e,t),s(e,es,t),s(e,tt,t),s(e,ts,t),d(nt,e,t),s(e,ns,t),d(st,e,t),s(e,ss,t),s(e,at,t),s(e,as,t),d(lt,e,t),s(e,ls,t),s(e,W,t),d(ot,W,null),h(W,Js),h(W,C),d(it,C,null),h(C,Us),h(C,Xt),h(C,Ts),d(H,C,null),h(W,Zs),h(W,z),d(pt,z,null),h(z,vs),h(z,kt),s(e,os,t),d(rt,e,t),s(e,is,t),s(e,B,t),d(ct,B,null),h(B,js),h(B,R),d(dt,R,null),h(R,Ws),h(R,Gt),h(R,Bs),d(q,R,null),h(B,Fs),h(B,L),d(mt,L,null),h(L,Xs),h(L,Ct),s(e,ps,t),d(gt,e,t),s(e,rs,t),s(e,F,t),d(ut,F,null),h(F,ks),h(F,V),d(ft,V,null),h(V,Gs),h(V,Rt),h(V,Cs),d(D,V,null),h(F,Rs),h(F,A),d(ht,A,null),h(A,Vs),h(A,Vt),s(e,cs,t),d(_t,e,t),s(e,ds,t),s(e,X,t),d(yt,X,null),h(X,xs),h(X,x),d(Mt,x,null),h(x,Ss),h(x,xt),h(x,Es),d(K,x,null),h(X,Ns),h(X,O),d(bt,O,null),h(O,$s),h(O,St),s(e,ms,t),d(It,e,t),s(e,gs,t),s(e,k,t),d(wt,k,null),h(k,Qs),h(k,S),d(Jt,S,null),h(S,Ys),h(S,Et),h(S,Ps),d(ee,S,null),h(k,Hs),h(k,te),d(Ut,te,null),h(te,zs),h(te,Nt),s(e,us,t),d(Tt,e,t),s(e,fs,t),s(e,G,t),d(Zt,G,null),h(G,qs),h(G,E),d(vt,E,null),h(E,Ls),h(E,$t),h(E,Ds),d(ne,E,null),h(G,As),h(G,se),d(jt,se,null),h(se,Ks),h(se,Qt),s(e,hs,t),d(Wt,e,t),s(e,_s,t),s(e,en,t),ys=!0},p(e,[t]){const N={};t&2&&(N.$$scope={dirty:t,ctx:e}),H.$set(N);const $={};t&2&&($.$$scope={dirty:t,ctx:e}),q.$set($);const Bt={};t&2&&(Bt.$$scope={dirty:t,ctx:e}),D.$set(Bt);const Q={};t&2&&(Q.$$scope={dirty:t,ctx:e}),K.$set(Q);const Y={};t&2&&(Y.$$scope={dirty:t,ctx:e}),ee.$set(Y);const Ft={};t&2&&(Ft.$$scope={dirty:t,ctx:e}),ne.$set(Ft)},i(e){ys||(m(M.$$.fragment,e),m(I.$$.fragment,e),m(ae.$$.fragment,e),m(ie.$$.fragment,e),m(ce.$$.fragment,e),m(me.$$.fragment,e),m(fe.$$.fragment,e),m(Ie.$$.fragment,e),m(Je.$$.fragment,e),m(Ue.$$.fragment,e),m(ve.$$.fragment,e),m(je.$$.fragment,e),m(Fe.$$.fragment,e),m(Xe.$$.fragment,e),m(Ge.$$.fragment,e),m(Ce.$$.fragment,e),m(Ve.$$.fragment,e),m(Ee.$$.fragment,e),m($e.$$.fragment,e),m(Ye.$$.fragment,e),m(He.$$.fragment,e),m(ze.$$.fragment,e),m(De.$$.fragment,e),m(Ke.$$.fragment,e),m(et.$$.fragment,e),m(nt.$$.fragment,e),m(st.$$.fragment,e),m(lt.$$.fragment,e),m(ot.$$.fragment,e),m(it.$$.fragment,e),m(H.$$.fragment,e),m(pt.$$.fragment,e),m(rt.$$.fragment,e),m(ct.$$.fragment,e),m(dt.$$.fragment,e),m(q.$$.fragment,e),m(mt.$$.fragment,e),m(gt.$$.fragment,e),m(ut.$$.fragment,e),m(ft.$$.fragment,e),m(D.$$.fragment,e),m(ht.$$.fragment,e),m(_t.$$.fragment,e),m(yt.$$.fragment,e),m(Mt.$$.fragment,e),m(K.$$.fragment,e),m(bt.$$.fragment,e),m(It.$$.fragment,e),m(wt.$$.fragment,e),m(Jt.$$.fragment,e),m(ee.$$.fragment,e),m(Ut.$$.fragment,e),m(Tt.$$.fragment,e),m(Zt.$$.fragment,e),m(vt.$$.fragment,e),m(ne.$$.fragment,e),m(jt.$$.fragment,e),m(Wt.$$.fragment,e),ys=!0)},o(e){g(M.$$.fragment,e),g(I.$$.fragment,e),g(ae.$$.fragment,e),g(ie.$$.fragment,e),g(ce.$$.fragment,e),g(me.$$.fragment,e),g(fe.$$.fragment,e),g(Ie.$$.fragment,e),g(Je.$$.fragment,e),g(Ue.$$.fragment,e),g(ve.$$.fragment,e),g(je.$$.fragment,e),g(Fe.$$.fragment,e),g(Xe.$$.fragment,e),g(Ge.$$.fragment,e),g(Ce.$$.fragment,e),g(Ve.$$.fragment,e),g(Ee.$$.fragment,e),g($e.$$.fragment,e),g(Ye.$$.fragment,e),g(He.$$.fragment,e),g(ze.$$.fragment,e),g(De.$$.fragment,e),g(Ke.$$.fragment,e),g(et.$$.fragment,e),g(nt.$$.fragment,e),g(st.$$.fragment,e),g(lt.$$.fragment,e),g(ot.$$.fragment,e),g(it.$$.fragment,e),g(H.$$.fragment,e),g(pt.$$.fragment,e),g(rt.$$.fragment,e),g(ct.$$.fragment,e),g(dt.$$.fragment,e),g(q.$$.fragment,e),g(mt.$$.fragment,e),g(gt.$$.fragment,e),g(ut.$$.fragment,e),g(ft.$$.fragment,e),g(D.$$.fragment,e),g(ht.$$.fragment,e),g(_t.$$.fragment,e),g(yt.$$.fragment,e),g(Mt.$$.fragment,e),g(K.$$.fragment,e),g(bt.$$.fragment,e),g(It.$$.fragment,e),g(wt.$$.fragment,e),g(Jt.$$.fragment,e),g(ee.$$.fragment,e),g(Ut.$$.fragment,e),g(Tt.$$.fragment,e),g(Zt.$$.fragment,e),g(vt.$$.fragment,e),g(ne.$$.fragment,e),g(jt.$$.fragment,e),g(Wt.$$.fragment,e),ys=!1},d(e){e&&(n(w),n(b),n(_),n(o),n(tn),n(P),n(nn),n(sn),n(le),n(an),n(oe),n(ln),n(on),n(pe),n(pn),n(re),n(rn),n(cn),n(de),n(dn),n(mn),n(ge),n(gn),n(ue),n(un),n(fn),n(he),n(hn),n(_e),n(_n),n(ye),n(yn),n(Me),n(Mn),n(be),n(bn),n(In),n(we),n(wn),n(Jn),n(Un),n(Te),n(Tn),n(Ze),n(Zn),n(vn),n(jn),n(We),n(Wn),n(Be),n(Bn),n(Fn),n(Xn),n(ke),n(kn),n(Gn),n(Cn),n(Re),n(Rn),n(Vn),n(xe),n(xn),n(Se),n(Sn),n(En),n(Ne),n(Nn),n($n),n(Qe),n(Qn),n(Yn),n(Pe),n(Pn),n(Hn),n(zn),n(qe),n(qn),n(Le),n(Ln),n(Dn),n(Ae),n(An),n(Kn),n(Oe),n(On),n(es),n(tt),n(ts),n(ns),n(ss),n(at),n(as),n(ls),n(W),n(os),n(is),n(B),n(ps),n(rs),n(F),n(cs),n(ds),n(X),n(ms),n(gs),n(k),n(us),n(fs),n(G),n(hs),n(_s),n(en)),n(f),u(M,e),u(I,e),u(ae,e),u(ie,e),u(ce,e),u(me,e),u(fe,e),u(Ie,e),u(Je,e),u(Ue,e),u(ve,e),u(je,e),u(Fe,e),u(Xe,e),u(Ge,e),u(Ce,e),u(Ve,e),u(Ee,e),u($e,e),u(Ye,e),u(He,e),u(ze,e),u(De,e),u(Ke,e),u(et,e),u(nt,e),u(st,e),u(lt,e),u(ot),u(it),u(H),u(pt),u(rt,e),u(ct),u(dt),u(q),u(mt),u(gt,e),u(ut),u(ft),u(D),u(ht),u(_t,e),u(yt),u(Mt),u(K),u(bt),u(It,e),u(wt),u(Jt),u(ee),u(Ut),u(Tt,e),u(Zt),u(vt),u(ne),u(jt),u(Wt,e)}}}const nl='{"title":"DeepFloyd IF","local":"deepfloyd-if","sections":[{"title":"Overview","local":"overview","sections":[],"depth":2},{"title":"Usage","local":"usage","sections":[{"title":"Text-to-Image Generation","local":"text-to-image-generation","sections":[],"depth":3},{"title":"Text Guided Image-to-Image Generation","local":"text-guided-image-to-image-generation","sections":[],"depth":3},{"title":"Text Guided Inpainting Generation","local":"text-guided-inpainting-generation","sections":[],"depth":3},{"title":"Converting between different pipelines","local":"converting-between-different-pipelines","sections":[],"depth":3},{"title":"Optimizing for speed","local":"optimizing-for-speed","sections":[],"depth":3},{"title":"Optimizing for memory","local":"optimizing-for-memory","sections":[],"depth":3}],"depth":2},{"title":"Available Pipelines:","local":"available-pipelines","sections":[],"depth":2},{"title":"IFPipeline","local":"diffusers.IFPipeline","sections":[],"depth":2},{"title":"IFSuperResolutionPipeline","local":"diffusers.IFSuperResolutionPipeline","sections":[],"depth":2},{"title":"IFImg2ImgPipeline","local":"diffusers.IFImg2ImgPipeline","sections":[],"depth":2},{"title":"IFImg2ImgSuperResolutionPipeline","local":"diffusers.IFImg2ImgSuperResolutionPipeline","sections":[],"depth":2},{"title":"IFInpaintingPipeline","local":"diffusers.IFInpaintingPipeline","sections":[],"depth":2},{"title":"IFInpaintingSuperResolutionPipeline","local":"diffusers.IFInpaintingSuperResolutionPipeline","sections":[],"depth":2}],"depth":1}';function sl(j){return Qa(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class dl extends Ya{constructor(f){super(),Pa(this,f,sl,tl,$a,{})}}export{dl as component}; | |
Xet Storage Details
- Size:
- 197 kB
- Xet hash:
- 2b4b1ff65b26ba8739fb586e9f475f29a84912f38a6fddd11a280735107e1906
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.