Buckets:
| import{s as en,o as tn,n as O}from"../chunks/scheduler.8c3d61f6.js";import{S as nn,i as on,g as d,s as i,r as v,A as sn,h as p,f as l,c as a,j as k,u as w,x as g,k as P,y as o,a as _,v as y,d as x,t as $,w as D}from"../chunks/index.da70eac4.js";import{T as Pt}from"../chunks/Tip.1d9b8c37.js";import{D as L}from"../chunks/Docstring.ee4b6913.js";import{C as We}from"../chunks/CodeBlock.00a903b3.js";import{E as Le}from"../chunks/ExampleCodeBlock.f7bd2c1f.js";import{H as Ut,E as an}from"../chunks/EditOnGithub.1e64e623.js";function rn(M){let n,b='Make sure to check out the Stable Diffusion <a href="overview#tips">Tips</a> section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!',r,s,f='If you’re interested in using one of the official checkpoints for a task, explore the <a href="https://huggingface.co/CompVis" rel="nofollow">CompVis</a>, <a href="https://huggingface.co/runwayml" rel="nofollow">Runway</a>, and <a href="https://huggingface.co/stabilityai" rel="nofollow">Stability AI</a> Hub organizations!';return{c(){n=d("p"),n.innerHTML=b,r=i(),s=d("p"),s.innerHTML=f},l(t){n=p(t,"P",{"data-svelte-h":!0}),g(n)!=="svelte-1j961ct"&&(n.innerHTML=b),r=a(t),s=p(t,"P",{"data-svelte-h":!0}),g(s)!=="svelte-z4pn9c"&&(s.innerHTML=f)},m(t,u){_(t,n,u),_(t,r,u),_(t,s,u)},p:O,d(t){t&&(l(n),l(r),l(s))}}}function ln(M){let n,b="Examples:",r,s,f;return s=new We({props:{code:"aW1wb3J0JTIwdG9yY2glMEFpbXBvcnQlMjByZXF1ZXN0cyUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQSUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBTdGFibGVEaWZmdXNpb25EZXB0aDJJbWdQaXBlbGluZSUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25EZXB0aDJJbWdQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItZGVwdGglMjIlMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMkMlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQSUwQXVybCUyMCUzRCUyMCUyMmh0dHAlM0ElMkYlMkZpbWFnZXMuY29jb2RhdGFzZXQub3JnJTJGdmFsMjAxNyUyRjAwMDAwMDAzOTc2OS5qcGclMjIlMEFpbml0X2ltYWdlJTIwJTNEJTIwSW1hZ2Uub3BlbihyZXF1ZXN0cy5nZXQodXJsJTJDJTIwc3RyZWFtJTNEVHJ1ZSkucmF3KSUwQXByb21wdCUyMCUzRCUyMCUyMnR3byUyMHRpZ2VycyUyMiUwQW5fcHJvbXB0JTIwJTNEJTIwJTIyYmFkJTJDJTIwZGVmb3JtZWQlMkMlMjB1Z2x5JTJDJTIwYmFkJTIwYW5vdG9teSUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlM0Rwcm9tcHQlMkMlMjBpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuX3Byb21wdCUyQyUyMHN0cmVuZ3RoJTNEMC43KS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionDepth2ImgPipeline | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stabilityai/stable-diffusion-2-depth"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"http://images.cocodataset.org/val2017/000000039769.jpg"</span> | |
| <span class="hljs-meta">>>> </span>init_image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"two tigers"</span> | |
| <span class="hljs-meta">>>> </span>n_prompt = <span class="hljs-string">"bad, deformed, ugly, bad anotomy"</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=<span class="hljs-number">0.7</span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=d("p"),n.textContent=b,r=i(),v(s.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),g(n)!=="svelte-kvfsh7"&&(n.textContent=b),r=a(t),w(s.$$.fragment,t)},m(t,u){_(t,n,u),_(t,r,u),y(s,t,u),f=!0},p:O,i(t){f||(x(s.$$.fragment,t),f=!0)},o(t){$(s.$$.fragment,t),f=!1},d(t){t&&(l(n),l(r)),D(s,t)}}}function dn(M){let n,b=`⚠️ Don’t enable attention slicing if you’re already using <code>scaled_dot_product_attention</code> (SDPA) from PyTorch | |
| 2.0 or xFormers. These attention computations are already very memory efficient so you won’t need to enable | |
| this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs!`;return{c(){n=d("p"),n.innerHTML=b},l(r){n=p(r,"P",{"data-svelte-h":!0}),g(n)!=="svelte-ackzsn"&&(n.innerHTML=b)},m(r,s){_(r,n,s)},p:O,d(r){r&&l(n)}}}function pn(M){let n,b="Examples:",r,s,f;return s=new We({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBJTIwJTIwJTIwJTIwdXNlX3NhZmV0ZW5zb3JzJTNEVHJ1ZSUyQyUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMGFuJTIwYXN0cm9uYXV0JTIwcmlkaW5nJTIwYSUyMGhvcnNlJTIwb24lMjBtYXJzJTIyJTBBcGlwZS5lbmFibGVfYXR0ZW50aW9uX3NsaWNpbmcoKSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span> use_safetensors=<span class="hljs-literal">True</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"a photo of an astronaut riding a horse on mars"</span> | |
| <span class="hljs-meta">>>> </span>pipe.enable_attention_slicing() | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=d("p"),n.textContent=b,r=i(),v(s.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),g(n)!=="svelte-kvfsh7"&&(n.textContent=b),r=a(t),w(s.$$.fragment,t)},m(t,u){_(t,n,u),_(t,r,u),y(s,t,u),f=!0},p:O,i(t){f||(x(s.$$.fragment,t),f=!0)},o(t){$(s.$$.fragment,t),f=!1},d(t){t&&(l(n),l(r)),D(s,t)}}}function cn(M){let n,b=`⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes | |
| precedent.`;return{c(){n=d("p"),n.textContent=b},l(r){n=p(r,"P",{"data-svelte-h":!0}),g(n)!=="svelte-17p1lpg"&&(n.textContent=b)},m(r,s){_(r,n,s)},p:O,d(r){r&&l(n)}}}function fn(M){let n,b="Examples:",r,s,f;return s=new We({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIweGZvcm1lcnMub3BzJTIwaW1wb3J0JTIwTWVtb3J5RWZmaWNpZW50QXR0ZW50aW9uRmxhc2hBdHRlbnRpb25PcCUwQSUwQXBpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItMSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlJTIwJTNEJTIwcGlwZS50byglMjJjdWRhJTIyKSUwQXBpcGUuZW5hYmxlX3hmb3JtZXJzX21lbW9yeV9lZmZpY2llbnRfYXR0ZW50aW9uKGF0dGVudGlvbl9vcCUzRE1lbW9yeUVmZmljaWVudEF0dGVudGlvbkZsYXNoQXR0ZW50aW9uT3ApJTBBJTIzJTIwV29ya2Fyb3VuZCUyMGZvciUyMG5vdCUyMGFjY2VwdGluZyUyMGF0dGVudGlvbiUyMHNoYXBlJTIwdXNpbmclMjBWQUUlMjBmb3IlMjBGbGFzaCUyMEF0dGVudGlvbiUwQXBpcGUudmFlLmVuYWJsZV94Zm9ybWVyc19tZW1vcnlfZWZmaWNpZW50X2F0dGVudGlvbihhdHRlbnRpb25fb3AlM0ROb25lKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> xformers.ops <span class="hljs-keyword">import</span> MemoryEfficientAttentionFlashAttentionOp | |
| <span class="hljs-meta">>>> </span>pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Workaround for not accepting attention shape using VAE for Flash Attention</span> | |
| <span class="hljs-meta">>>> </span>pipe.vae.enable_xformers_memory_efficient_attention(attention_op=<span class="hljs-literal">None</span>)`,wrap:!1}}),{c(){n=d("p"),n.textContent=b,r=i(),v(s.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),g(n)!=="svelte-kvfsh7"&&(n.textContent=b),r=a(t),w(s.$$.fragment,t)},m(t,u){_(t,n,u),_(t,r,u),y(s,t,u),f=!0},p:O,i(t){f||(x(s.$$.fragment,t),f=!0)},o(t){$(s.$$.fragment,t),f=!1},d(t){t&&(l(n),l(r)),D(s,t)}}}function mn(M){let n,b="To load a Textual Inversion embedding vector in 🤗 Diffusers format:",r,s,f;return s=new We({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZChtb2RlbF9pZCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNikudG8oJTIyY3VkYSUyMiklMEElMEFwaXBlLmxvYWRfdGV4dHVhbF9pbnZlcnNpb24oJTIyc2QtY29uY2VwdHMtbGlicmFyeSUyRmNhdC10b3klMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMCUzQ2NhdC10b3klM0UlMjBiYWNrcGFjayUyMiUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTApLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMmNhdC1iYWNrcGFjay5wbmclMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| model_id = <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span> | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_textual_inversion(<span class="hljs-string">"sd-concepts-library/cat-toy"</span>) | |
| prompt = <span class="hljs-string">"A <cat-toy> backpack"</span> | |
| image = pipe(prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"cat-backpack.png"</span>)`,wrap:!1}}),{c(){n=d("p"),n.textContent=b,r=i(),v(s.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),g(n)!=="svelte-1gc783q"&&(n.textContent=b),r=a(t),w(s.$$.fragment,t)},m(t,u){_(t,n,u),_(t,r,u),y(s,t,u),f=!0},p:O,i(t){f||(x(s.$$.fragment,t),f=!0)},o(t){$(s.$$.fragment,t),f=!1},d(t){t&&(l(n),l(r)),D(s,t)}}}function un(M){let n,b="locally:",r,s,f;return s=new We({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZChtb2RlbF9pZCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNikudG8oJTIyY3VkYSUyMiklMEElMEFwaXBlLmxvYWRfdGV4dHVhbF9pbnZlcnNpb24oJTIyLiUyRmNoYXJ0dXJuZXJ2Mi5wdCUyMiUyQyUyMHRva2VuJTNEJTIyY2hhcnR1cm5lcnYyJTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMmNoYXJ0dXJuZXJ2MiUyQyUyMG11bHRpcGxlJTIwdmlld3MlMjBvZiUyMHRoZSUyMHNhbWUlMjBjaGFyYWN0ZXIlMjBpbiUyMHRoZSUyMHNhbWUlMjBvdXRmaXQlMkMlMjBhJTIwY2hhcmFjdGVyJTIwdHVybmFyb3VuZCUyMG9mJTIwYSUyMHdvbWFuJTIwd2VhcmluZyUyMGElMjBibGFjayUyMGphY2tldCUyMGFuZCUyMHJlZCUyMHNoaXJ0JTJDJTIwYmVzdCUyMHF1YWxpdHklMkMlMjBpbnRyaWNhdGUlMjBkZXRhaWxzLiUyMiUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTApLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMmNoYXJhY3Rlci5wbmclMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| model_id = <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span> | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_textual_inversion(<span class="hljs-string">"./charturnerv2.pt"</span>, token=<span class="hljs-string">"charturnerv2"</span>) | |
| prompt = <span class="hljs-string">"charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."</span> | |
| image = pipe(prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"character.png"</span>)`,wrap:!1}}),{c(){n=d("p"),n.textContent=b,r=i(),v(s.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),g(n)!=="svelte-4c75kq"&&(n.textContent=b),r=a(t),w(s.$$.fragment,t)},m(t,u){_(t,n,u),_(t,r,u),y(s,t,u),f=!0},p:O,i(t){f||(x(s.$$.fragment,t),f=!0)},o(t){$(s.$$.fragment,t),f=!1},d(t){t&&(l(n),l(r)),D(s,t)}}}function hn(M){let n,b,r,s,f,t,u,Ct='The Stable Diffusion model can also infer depth based on an image using <a href="https://github.com/isl-org/MiDaS" rel="nofollow">MiDaS</a>. This allows you to pass a text prompt and an initial image to condition the generation of new images as well as a <code>depth_map</code> to preserve the image structure.',Ge,R,Ne,K,Re,m,ee,qe,he,jt="Pipeline for text-guided depth-based image-to-image generation using Stable Diffusion.",Ae,ge,Zt=`This model inherits from <a href="/docs/diffusers/main/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods | |
| implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Qe,_e,Lt="The pipeline also inherits the following loading methods:",Oe,be,Wt='<li><a href="/docs/diffusers/main/en/api/loaders/textual_inversion#diffusers.loaders.TextualInversionLoaderMixin.load_textual_inversion">load_textual_inversion()</a> for loading textual inversion embeddings</li> <li><a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights">load_lora_weights()</a> for loading LoRA weights</li> <li><a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights">save_lora_weights()</a> for saving LoRA weights</li>',Ke,W,te,et,ve,Gt="The call function to the pipeline for generation.",tt,E,nt,U,ne,ot,we,Nt=`Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor | |
| in slices to compute attention in several steps. For more than one attention head, the computation is performed | |
| sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.`,st,B,it,X,at,F,oe,rt,ye,Rt=`Disable sliced attention computation. If <code>enable_attention_slicing</code> was previously called, attention is | |
| computed in one step.`,lt,C,se,dt,xe,Et=`Enable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>. When this | |
| option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed | |
| up during training is not guaranteed.`,pt,H,ct,z,ft,Y,ie,mt,$e,Bt='Disable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>.',ut,I,ae,ht,De,Xt=`Load Textual Inversion embeddings into the text encoder of <a href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline">StableDiffusionPipeline</a> (both 🤗 Diffusers and | |
| Automatic1111 formats are supported).`,gt,Me,Ft="Example:",_t,V,bt,Ie,Ht=`To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first | |
| (for example from <a href="https://civitai.com/models/3036?modelVersionId=9857" rel="nofollow">civitAI</a>) and then load the vector`,vt,q,wt,T,re,yt,Te,zt=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.unet</code> and | |
| <code>self.text_encoder</code>.`,xt,Se,Yt="All kwargs are forwarded to <code>self.lora_state_dict</code>.",$t,Je,Vt=`See <a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is | |
| loaded.`,Dt,ke,qt=`See <a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet">load_lora_into_unet()</a> for more details on how the state dict is | |
| loaded into <code>self.unet</code>.`,Mt,Pe,At=`See <a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder">load_lora_into_text_encoder()</a> for more details on how the state | |
| dict is loaded into <code>self.text_encoder</code>.`,It,A,le,Tt,Ue,Qt="Save the LoRA parameters corresponding to the UNet and text encoder.",St,Q,de,Jt,Ce,Ot="Encodes the prompt into text encoder hidden states.",Ee,pe,Be,G,ce,kt,je,Kt="Output class for Stable Diffusion pipelines.",Xe,fe,Fe,Ze,He;return f=new Ut({props:{title:"Depth-to-image",local:"depth-to-image",headingTag:"h1"}}),R=new Pt({props:{$$slots:{default:[rn]},$$scope:{ctx:M}}}),K=new Ut({props:{title:"StableDiffusionDepth2ImgPipeline",local:"diffusers.StableDiffusionDepth2ImgPipeline",headingTag:"h2"}}),ee=new L({props:{name:"class diffusers.StableDiffusionDepth2ImgPipeline",anchor:"diffusers.StableDiffusionDepth2ImgPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"depth_estimator",val:": DPTForDepthEstimation"},{name:"feature_extractor",val:": DPTImageProcessor"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/main/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>).`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| A <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/main/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) — | |
| A <code>UNet2DConditionModel</code> to denoise the encoded image latents.`,name:"unet"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/main/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of | |
| <a href="/docs/diffusers/main/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, <a href="/docs/diffusers/main/en/api/schedulers/lms_discrete#diffusers.LMSDiscreteScheduler">LMSDiscreteScheduler</a>, or <a href="/docs/diffusers/main/en/api/schedulers/pndm#diffusers.PNDMScheduler">PNDMScheduler</a>.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py#L77"}}),te=new L({props:{name:"__call__",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__",parameters:[{name:"prompt",val:": Union = None"},{name:"image",val:": Union = None"},{name:"depth_map",val:": Optional = None"},{name:"strength",val:": float = 0.8"},{name:"num_inference_steps",val:": Optional = 50"},{name:"guidance_scale",val:": Optional = 7.5"},{name:"negative_prompt",val:": Union = None"},{name:"num_images_per_prompt",val:": Optional = 1"},{name:"eta",val:": Optional = 0.0"},{name:"generator",val:": Union = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"output_type",val:": Optional = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": Optional = None"},{name:"clip_skip",val:": Optional = None"},{name:"callback_on_step_end",val:": Optional = None"},{name:"callback_on_step_end_tensor_inputs",val:": List = ['latents']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code> or tensor representing an image batch to be used as the starting point. Can accept image | |
| latents as <code>image</code> only if <code>depth_map</code> is not <code>None</code>.`,name:"image"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.depth_map",description:`<strong>depth_map</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Depth prediction to be used as additional conditioning for the image generation process. If not | |
| defined, it automatically predicts the depth with <code>self.depth_estimator</code>.`,name:"depth_map"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. This parameter is modulated by <code>strength</code>.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) from the <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">DDIM</a> paper. Only applies | |
| to the <a href="/docs/diffusers/main/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a | |
| plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py#L615",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),E=new Le({props:{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.__call__.example",$$slots:{default:[ln]},$$scope:{ctx:M}}}),ne=new L({props:{name:"enable_attention_slicing",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.enable_attention_slicing",parameters:[{name:"slice_size",val:": Union = 'auto'"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.enable_attention_slicing.slice_size",description:`<strong>slice_size</strong> (<code>str</code> or <code>int</code>, <em>optional</em>, defaults to <code>"auto"</code>) — | |
| When <code>"auto"</code>, halves the input to the attention heads, so attention will be computed in two steps. If | |
| <code>"max"</code>, maximum amount of memory will be saved by running only one slice at a time. If a number is | |
| provided, uses as many slices as <code>attention_head_dim // slice_size</code>. In this case, <code>attention_head_dim</code> | |
| must be a multiple of <code>slice_size</code>.`,name:"slice_size"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1648"}}),B=new Pt({props:{warning:!0,$$slots:{default:[dn]},$$scope:{ctx:M}}}),X=new Le({props:{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.enable_attention_slicing.example",$$slots:{default:[pn]},$$scope:{ctx:M}}}),oe=new L({props:{name:"disable_attention_slicing",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.disable_attention_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1688"}}),se=new L({props:{name:"enable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.enable_xformers_memory_efficient_attention",parameters:[{name:"attention_op",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.enable_xformers_memory_efficient_attention.attention_op",description:`<strong>attention_op</strong> (<code>Callable</code>, <em>optional</em>) — | |
| Override the default <code>None</code> operator for use as <code>op</code> argument to the | |
| <a href="https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention" rel="nofollow"><code>memory_efficient_attention()</code></a> | |
| function of xFormers.`,name:"attention_op"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1587"}}),H=new Pt({props:{warning:!0,$$slots:{default:[cn]},$$scope:{ctx:M}}}),z=new Le({props:{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.enable_xformers_memory_efficient_attention.example",$$slots:{default:[fn]},$$scope:{ctx:M}}}),ie=new L({props:{name:"disable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.disable_xformers_memory_efficient_attention",parameters:[],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1622"}}),ae=new L({props:{name:"load_textual_inversion",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion",parameters:[{name:"pretrained_model_name_or_path",val:": Union"},{name:"token",val:": Union = None"},{name:"tokenizer",val:": Optional = None"},{name:"text_encoder",val:": Optional = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code> or <code>List[str or os.PathLike]</code> or <code>Dict</code> or <code>List[Dict]</code>) — | |
| Can be either one of the following or a list of them:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>sd-concepts-library/low-poly-hd-logos-icons</code>) of a | |
| pretrained model hosted on the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_text_inversion_directory/</code>) containing the textual | |
| inversion weights.</li> | |
| <li>A path to a <em>file</em> (for example <code>./my_text_inversions.pt</code>) containing textual inversion weights.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.token",description:`<strong>token</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| Override the token to use for the textual inversion weights. If <code>pretrained_model_name_or_path</code> is a | |
| list, then <code>token</code> must also be a list of equal length.`,name:"token"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>, <em>optional</em>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>). | |
| If not specified, function will take self.tokenizer.`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>, <em>optional</em>) — | |
| A <code>CLIPTokenizer</code> to tokenize text. If not specified, function will take self.tokenizer.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.weight_name",description:`<strong>weight_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Name of a custom weight file. This should be used when:</p> | |
| <ul> | |
| <li>The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight | |
| name such as <code>text_inv.bin</code>.</li> | |
| <li>The saved textual inversion file is in the Automatic1111 format.</li> | |
| </ul>`,name:"weight_name"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) — | |
| Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not | |
| guarantee the timeliness or safety of the source, and you should refer to the mirror site for more | |
| information.`,name:"mirror"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/loaders/textual_inversion.py#L263"}}),V=new Le({props:{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.example",$$slots:{default:[mn]},$$scope:{ctx:M}}}),q=new Le({props:{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_textual_inversion.example-2",$$slots:{default:[un]},$$scope:{ctx:M}}}),re=new L({props:{name:"load_lora_weights",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": Union"},{name:"adapter_name",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_lora_weights.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| See <a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"kwargs"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/loaders/lora_pipeline.py#L65"}}),le=new L({props:{name:"save_lora_weights",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.save_lora_weights",parameters:[{name:"save_directory",val:": Union"},{name:"unet_lora_layers",val:": Dict = None"},{name:"text_encoder_lora_layers",val:": Dict = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": Callable = None"},{name:"safe_serialization",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.save_lora_weights.unet_lora_layers",description:`<strong>unet_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>unet</code>.`,name:"unet_lora_layers"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.save_lora_weights.text_encoder_lora_layers",description:`<strong>text_encoder_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_lora_layers"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/loaders/lora_pipeline.py#L389"}}),de=new L({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:""},{name:"device",val:""},{name:"num_images_per_prompt",val:""},{name:"do_classifier_free_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"lora_scale",val:": Optional = None"},{name:"clip_skip",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded | |
| device — (<code>torch.device</code>): | |
| torch device`,name:"prompt"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionDepth2ImgPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py#L185"}}),pe=new Ut({props:{title:"StableDiffusionPipelineOutput",local:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",headingTag:"h2"}}),ce=new L({props:{name:"class diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",parameters:[{name:"images",val:": Union"},{name:"nsfw_content_detected",val:": Optional"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) — | |
| List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or | |
| <code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L10"}}),fe=new an({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/stable_diffusion/depth2img.md"}}),{c(){n=d("meta"),b=i(),r=d("p"),s=i(),v(f.$$.fragment),t=i(),u=d("p"),u.innerHTML=Ct,Ge=i(),v(R.$$.fragment),Ne=i(),v(K.$$.fragment),Re=i(),m=d("div"),v(ee.$$.fragment),qe=i(),he=d("p"),he.textContent=jt,Ae=i(),ge=d("p"),ge.innerHTML=Zt,Qe=i(),_e=d("p"),_e.textContent=Lt,Oe=i(),be=d("ul"),be.innerHTML=Wt,Ke=i(),W=d("div"),v(te.$$.fragment),et=i(),ve=d("p"),ve.textContent=Gt,tt=i(),v(E.$$.fragment),nt=i(),U=d("div"),v(ne.$$.fragment),ot=i(),we=d("p"),we.textContent=Nt,st=i(),v(B.$$.fragment),it=i(),v(X.$$.fragment),at=i(),F=d("div"),v(oe.$$.fragment),rt=i(),ye=d("p"),ye.innerHTML=Rt,lt=i(),C=d("div"),v(se.$$.fragment),dt=i(),xe=d("p"),xe.innerHTML=Et,pt=i(),v(H.$$.fragment),ct=i(),v(z.$$.fragment),ft=i(),Y=d("div"),v(ie.$$.fragment),mt=i(),$e=d("p"),$e.innerHTML=Bt,ut=i(),I=d("div"),v(ae.$$.fragment),ht=i(),De=d("p"),De.innerHTML=Xt,gt=i(),Me=d("p"),Me.textContent=Ft,_t=i(),v(V.$$.fragment),bt=i(),Ie=d("p"),Ie.innerHTML=Ht,vt=i(),v(q.$$.fragment),wt=i(),T=d("div"),v(re.$$.fragment),yt=i(),Te=d("p"),Te.innerHTML=zt,xt=i(),Se=d("p"),Se.innerHTML=Yt,$t=i(),Je=d("p"),Je.innerHTML=Vt,Dt=i(),ke=d("p"),ke.innerHTML=qt,Mt=i(),Pe=d("p"),Pe.innerHTML=At,It=i(),A=d("div"),v(le.$$.fragment),Tt=i(),Ue=d("p"),Ue.textContent=Qt,St=i(),Q=d("div"),v(de.$$.fragment),Jt=i(),Ce=d("p"),Ce.textContent=Ot,Ee=i(),v(pe.$$.fragment),Be=i(),G=d("div"),v(ce.$$.fragment),kt=i(),je=d("p"),je.textContent=Kt,Xe=i(),v(fe.$$.fragment),Fe=i(),Ze=d("p"),this.h()},l(e){const c=sn("svelte-u9bgzb",document.head);n=p(c,"META",{name:!0,content:!0}),c.forEach(l),b=a(e),r=p(e,"P",{}),k(r).forEach(l),s=a(e),w(f.$$.fragment,e),t=a(e),u=p(e,"P",{"data-svelte-h":!0}),g(u)!=="svelte-z7xyn"&&(u.innerHTML=Ct),Ge=a(e),w(R.$$.fragment,e),Ne=a(e),w(K.$$.fragment,e),Re=a(e),m=p(e,"DIV",{class:!0});var h=k(m);w(ee.$$.fragment,h),qe=a(h),he=p(h,"P",{"data-svelte-h":!0}),g(he)!=="svelte-1fmvafp"&&(he.textContent=jt),Ae=a(h),ge=p(h,"P",{"data-svelte-h":!0}),g(ge)!=="svelte-496sm0"&&(ge.innerHTML=Zt),Qe=a(h),_e=p(h,"P",{"data-svelte-h":!0}),g(_e)!=="svelte-14s6m4u"&&(_e.textContent=Lt),Oe=a(h),be=p(h,"UL",{"data-svelte-h":!0}),g(be)!=="svelte-txtwv6"&&(be.innerHTML=Wt),Ke=a(h),W=p(h,"DIV",{class:!0});var N=k(W);w(te.$$.fragment,N),et=a(N),ve=p(N,"P",{"data-svelte-h":!0}),g(ve)!=="svelte-50j04k"&&(ve.textContent=Gt),tt=a(N),w(E.$$.fragment,N),N.forEach(l),nt=a(h),U=p(h,"DIV",{class:!0});var j=k(U);w(ne.$$.fragment,j),ot=a(j),we=p(j,"P",{"data-svelte-h":!0}),g(we)!=="svelte-10jaql7"&&(we.textContent=Nt),st=a(j),w(B.$$.fragment,j),it=a(j),w(X.$$.fragment,j),j.forEach(l),at=a(h),F=p(h,"DIV",{class:!0});var me=k(F);w(oe.$$.fragment,me),rt=a(me),ye=p(me,"P",{"data-svelte-h":!0}),g(ye)!=="svelte-1lh0nh5"&&(ye.innerHTML=Rt),me.forEach(l),lt=a(h),C=p(h,"DIV",{class:!0});var Z=k(C);w(se.$$.fragment,Z),dt=a(Z),xe=p(Z,"P",{"data-svelte-h":!0}),g(xe)!=="svelte-e03q3e"&&(xe.innerHTML=Et),pt=a(Z),w(H.$$.fragment,Z),ct=a(Z),w(z.$$.fragment,Z),Z.forEach(l),ft=a(h),Y=p(h,"DIV",{class:!0});var ue=k(Y);w(ie.$$.fragment,ue),mt=a(ue),$e=p(ue,"P",{"data-svelte-h":!0}),g($e)!=="svelte-1vfte1e"&&($e.innerHTML=Bt),ue.forEach(l),ut=a(h),I=p(h,"DIV",{class:!0});var S=k(I);w(ae.$$.fragment,S),ht=a(S),De=p(S,"P",{"data-svelte-h":!0}),g(De)!=="svelte-1lv88wo"&&(De.innerHTML=Xt),gt=a(S),Me=p(S,"P",{"data-svelte-h":!0}),g(Me)!=="svelte-11lpom8"&&(Me.textContent=Ft),_t=a(S),w(V.$$.fragment,S),bt=a(S),Ie=p(S,"P",{"data-svelte-h":!0}),g(Ie)!=="svelte-15d7mv5"&&(Ie.innerHTML=Ht),vt=a(S),w(q.$$.fragment,S),S.forEach(l),wt=a(h),T=p(h,"DIV",{class:!0});var J=k(T);w(re.$$.fragment,J),yt=a(J),Te=p(J,"P",{"data-svelte-h":!0}),g(Te)!=="svelte-vs7s0z"&&(Te.innerHTML=zt),xt=a(J),Se=p(J,"P",{"data-svelte-h":!0}),g(Se)!=="svelte-15b960v"&&(Se.innerHTML=Yt),$t=a(J),Je=p(J,"P",{"data-svelte-h":!0}),g(Je)!=="svelte-15mw932"&&(Je.innerHTML=Vt),Dt=a(J),ke=p(J,"P",{"data-svelte-h":!0}),g(ke)!=="svelte-qyr8zl"&&(ke.innerHTML=qt),Mt=a(J),Pe=p(J,"P",{"data-svelte-h":!0}),g(Pe)!=="svelte-9uqd0r"&&(Pe.innerHTML=At),J.forEach(l),It=a(h),A=p(h,"DIV",{class:!0});var ze=k(A);w(le.$$.fragment,ze),Tt=a(ze),Ue=p(ze,"P",{"data-svelte-h":!0}),g(Ue)!=="svelte-1ufq5ot"&&(Ue.textContent=Qt),ze.forEach(l),St=a(h),Q=p(h,"DIV",{class:!0});var Ye=k(Q);w(de.$$.fragment,Ye),Jt=a(Ye),Ce=p(Ye,"P",{"data-svelte-h":!0}),g(Ce)!=="svelte-16q0ax1"&&(Ce.textContent=Ot),Ye.forEach(l),h.forEach(l),Ee=a(e),w(pe.$$.fragment,e),Be=a(e),G=p(e,"DIV",{class:!0});var Ve=k(G);w(ce.$$.fragment,Ve),kt=a(Ve),je=p(Ve,"P",{"data-svelte-h":!0}),g(je)!=="svelte-1qpjiuf"&&(je.textContent=Kt),Ve.forEach(l),Xe=a(e),w(fe.$$.fragment,e),Fe=a(e),Ze=p(e,"P",{}),k(Ze).forEach(l),this.h()},h(){P(n,"name","hf:doc:metadata"),P(n,"content",gn),P(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(m,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),P(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,c){o(document.head,n),_(e,b,c),_(e,r,c),_(e,s,c),y(f,e,c),_(e,t,c),_(e,u,c),_(e,Ge,c),y(R,e,c),_(e,Ne,c),y(K,e,c),_(e,Re,c),_(e,m,c),y(ee,m,null),o(m,qe),o(m,he),o(m,Ae),o(m,ge),o(m,Qe),o(m,_e),o(m,Oe),o(m,be),o(m,Ke),o(m,W),y(te,W,null),o(W,et),o(W,ve),o(W,tt),y(E,W,null),o(m,nt),o(m,U),y(ne,U,null),o(U,ot),o(U,we),o(U,st),y(B,U,null),o(U,it),y(X,U,null),o(m,at),o(m,F),y(oe,F,null),o(F,rt),o(F,ye),o(m,lt),o(m,C),y(se,C,null),o(C,dt),o(C,xe),o(C,pt),y(H,C,null),o(C,ct),y(z,C,null),o(m,ft),o(m,Y),y(ie,Y,null),o(Y,mt),o(Y,$e),o(m,ut),o(m,I),y(ae,I,null),o(I,ht),o(I,De),o(I,gt),o(I,Me),o(I,_t),y(V,I,null),o(I,bt),o(I,Ie),o(I,vt),y(q,I,null),o(m,wt),o(m,T),y(re,T,null),o(T,yt),o(T,Te),o(T,xt),o(T,Se),o(T,$t),o(T,Je),o(T,Dt),o(T,ke),o(T,Mt),o(T,Pe),o(m,It),o(m,A),y(le,A,null),o(A,Tt),o(A,Ue),o(m,St),o(m,Q),y(de,Q,null),o(Q,Jt),o(Q,Ce),_(e,Ee,c),y(pe,e,c),_(e,Be,c),_(e,G,c),y(ce,G,null),o(G,kt),o(G,je),_(e,Xe,c),y(fe,e,c),_(e,Fe,c),_(e,Ze,c),He=!0},p(e,[c]){const h={};c&2&&(h.$$scope={dirty:c,ctx:e}),R.$set(h);const N={};c&2&&(N.$$scope={dirty:c,ctx:e}),E.$set(N);const j={};c&2&&(j.$$scope={dirty:c,ctx:e}),B.$set(j);const me={};c&2&&(me.$$scope={dirty:c,ctx:e}),X.$set(me);const Z={};c&2&&(Z.$$scope={dirty:c,ctx:e}),H.$set(Z);const ue={};c&2&&(ue.$$scope={dirty:c,ctx:e}),z.$set(ue);const S={};c&2&&(S.$$scope={dirty:c,ctx:e}),V.$set(S);const J={};c&2&&(J.$$scope={dirty:c,ctx:e}),q.$set(J)},i(e){He||(x(f.$$.fragment,e),x(R.$$.fragment,e),x(K.$$.fragment,e),x(ee.$$.fragment,e),x(te.$$.fragment,e),x(E.$$.fragment,e),x(ne.$$.fragment,e),x(B.$$.fragment,e),x(X.$$.fragment,e),x(oe.$$.fragment,e),x(se.$$.fragment,e),x(H.$$.fragment,e),x(z.$$.fragment,e),x(ie.$$.fragment,e),x(ae.$$.fragment,e),x(V.$$.fragment,e),x(q.$$.fragment,e),x(re.$$.fragment,e),x(le.$$.fragment,e),x(de.$$.fragment,e),x(pe.$$.fragment,e),x(ce.$$.fragment,e),x(fe.$$.fragment,e),He=!0)},o(e){$(f.$$.fragment,e),$(R.$$.fragment,e),$(K.$$.fragment,e),$(ee.$$.fragment,e),$(te.$$.fragment,e),$(E.$$.fragment,e),$(ne.$$.fragment,e),$(B.$$.fragment,e),$(X.$$.fragment,e),$(oe.$$.fragment,e),$(se.$$.fragment,e),$(H.$$.fragment,e),$(z.$$.fragment,e),$(ie.$$.fragment,e),$(ae.$$.fragment,e),$(V.$$.fragment,e),$(q.$$.fragment,e),$(re.$$.fragment,e),$(le.$$.fragment,e),$(de.$$.fragment,e),$(pe.$$.fragment,e),$(ce.$$.fragment,e),$(fe.$$.fragment,e),He=!1},d(e){e&&(l(b),l(r),l(s),l(t),l(u),l(Ge),l(Ne),l(Re),l(m),l(Ee),l(Be),l(G),l(Xe),l(Fe),l(Ze)),l(n),D(f,e),D(R,e),D(K,e),D(ee),D(te),D(E),D(ne),D(B),D(X),D(oe),D(se),D(H),D(z),D(ie),D(ae),D(V),D(q),D(re),D(le),D(de),D(pe,e),D(ce),D(fe,e)}}}const gn='{"title":"Depth-to-image","local":"depth-to-image","sections":[{"title":"StableDiffusionDepth2ImgPipeline","local":"diffusers.StableDiffusionDepth2ImgPipeline","sections":[],"depth":2},{"title":"StableDiffusionPipelineOutput","local":"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput","sections":[],"depth":2}],"depth":1}';function _n(M){return tn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Mn extends nn{constructor(n){super(),on(this,n,_n,hn,en,{})}}export{Mn as component}; | |
Xet Storage Details
- Size:
- 58.3 kB
- Xet hash:
- d4a10cbb1588a912fa4045ff32ebdb997b09c0a1b63c3a49dedfdd51aca508e1
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.