Buckets:
| import{s as dt,o as ft,n as ge}from"../chunks/scheduler.8c3d61f6.js";import{S as mt,i as ut,g as c,s as r,r as b,A as gt,h as d,f as o,c as p,j as P,u as w,x as T,k,y as a,a as m,v,d as $,t as y,w as x}from"../chunks/index.da70eac4.js";import{T as qe}from"../chunks/Tip.1d9b8c37.js";import{D as E}from"../chunks/Docstring.ee4b6913.js";import{C as He}from"../chunks/CodeBlock.00a903b3.js";import{E as Ye}from"../chunks/ExampleCodeBlock.f7bd2c1f.js";import{H as Oe,E as ht}from"../chunks/EditOnGithub.1e64e623.js";function _t(U){let n,_='Make sure to check out the Stable Diffusion <a href="overview#tips">Tips</a> section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!',i,s,u='If you’re interested in using one of the official checkpoints for a task, explore the <a href="https://huggingface.co/CompVis" rel="nofollow">CompVis</a>, <a href="https://huggingface.co/runwayml" rel="nofollow">Runway</a>, and <a href="https://huggingface.co/stabilityai" rel="nofollow">Stability AI</a> Hub organizations!';return{c(){n=c("p"),n.innerHTML=_,i=r(),s=c("p"),s.innerHTML=u},l(t){n=d(t,"P",{"data-svelte-h":!0}),T(n)!=="svelte-1j961ct"&&(n.innerHTML=_),i=p(t),s=d(t,"P",{"data-svelte-h":!0}),T(s)!=="svelte-z4pn9c"&&(s.innerHTML=u)},m(t,h){m(t,n,h),m(t,i,h),m(t,s,h)},p:ge,d(t){t&&(o(n),o(i),o(s))}}}function bt(U){let n,_="Examples:",i,s,u;return s=new He({props:{code:"aW1wb3J0JTIwcmVxdWVzdHMlMEFmcm9tJTIwUElMJTIwaW1wb3J0JTIwSW1hZ2UlMEFmcm9tJTIwaW8lMjBpbXBvcnQlMjBCeXRlc0lPJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblVwc2NhbGVQaXBlbGluZSUwQWltcG9ydCUyMHRvcmNoJTBBJTBBJTIzJTIwbG9hZCUyMG1vZGVsJTIwYW5kJTIwc2NoZWR1bGVyJTBBbW9kZWxfaWQlMjAlM0QlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teDQtdXBzY2FsZXIlMjIlMEFwaXBlbGluZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblVwc2NhbGVQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwbW9kZWxfaWQlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFwaXBlbGluZSUyMCUzRCUyMHBpcGVsaW5lLnRvKCUyMmN1ZGElMjIpJTBBJTBBJTIzJTIwbGV0J3MlMjBkb3dubG9hZCUyMGFuJTIwJTIwaW1hZ2UlMEF1cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZoZi1pbnRlcm5hbC10ZXN0aW5nJTJGZGlmZnVzZXJzLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGc2QyLXVwc2NhbGUlMkZsb3dfcmVzX2NhdC5wbmclMjIlMEFyZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBbG93X3Jlc19pbWclMjAlM0QlMjBJbWFnZS5vcGVuKEJ5dGVzSU8ocmVzcG9uc2UuY29udGVudCkpLmNvbnZlcnQoJTIyUkdCJTIyKSUwQWxvd19yZXNfaW1nJTIwJTNEJTIwbG93X3Jlc19pbWcucmVzaXplKCgxMjglMkMlMjAxMjgpKSUwQXByb21wdCUyMCUzRCUyMCUyMmElMjB3aGl0ZSUyMGNhdCUyMiUwQSUwQXVwc2NhbGVkX2ltYWdlJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0JTNEcHJvbXB0JTJDJTIwaW1hZ2UlM0Rsb3dfcmVzX2ltZykuaW1hZ2VzJTVCMCU1RCUwQXVwc2NhbGVkX2ltYWdlLnNhdmUoJTIydXBzYW1wbGVkX2NhdC5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionUpscalePipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># load model and scheduler</span> | |
| <span class="hljs-meta">>>> </span>model_id = <span class="hljs-string">"stabilityai/stable-diffusion-x4-upscaler"</span> | |
| <span class="hljs-meta">>>> </span>pipeline = StableDiffusionUpscalePipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> model_id, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipeline = pipeline.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># let's download an image</span> | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale/low_res_cat.png"</span> | |
| <span class="hljs-meta">>>> </span>response = requests.get(url) | |
| <span class="hljs-meta">>>> </span>low_res_img = Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>low_res_img = low_res_img.resize((<span class="hljs-number">128</span>, <span class="hljs-number">128</span>)) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"a white cat"</span> | |
| <span class="hljs-meta">>>> </span>upscaled_image = pipeline(prompt=prompt, image=low_res_img).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>upscaled_image.save(<span class="hljs-string">"upsampled_cat.png"</span>)`,wrap:!1}}),{c(){n=c("p"),n.textContent=_,i=r(),b(s.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),T(n)!=="svelte-kvfsh7"&&(n.textContent=_),i=p(t),w(s.$$.fragment,t)},m(t,h){m(t,n,h),m(t,i,h),v(s,t,h),u=!0},p:ge,i(t){u||($(s.$$.fragment,t),u=!0)},o(t){y(s.$$.fragment,t),u=!1},d(t){t&&(o(n),o(i)),x(s,t)}}}function wt(U){let n,_=`⚠️ Don’t enable attention slicing if you’re already using <code>scaled_dot_product_attention</code> (SDPA) from PyTorch | |
| 2.0 or xFormers. These attention computations are already very memory efficient so you won’t need to enable | |
| this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs!`;return{c(){n=c("p"),n.innerHTML=_},l(i){n=d(i,"P",{"data-svelte-h":!0}),T(n)!=="svelte-ackzsn"&&(n.innerHTML=_)},m(i,s){m(i,n,s)},p:ge,d(i){i&&o(n)}}}function vt(U){let n,_="Examples:",i,s,u;return s=new He({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBJTIwJTIwJTIwJTIwdXNlX3NhZmV0ZW5zb3JzJTNEVHJ1ZSUyQyUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMGFuJTIwYXN0cm9uYXV0JTIwcmlkaW5nJTIwYSUyMGhvcnNlJTIwb24lMjBtYXJzJTIyJTBBcGlwZS5lbmFibGVfYXR0ZW50aW9uX3NsaWNpbmcoKSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span> use_safetensors=<span class="hljs-literal">True</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"a photo of an astronaut riding a horse on mars"</span> | |
| <span class="hljs-meta">>>> </span>pipe.enable_attention_slicing() | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=c("p"),n.textContent=_,i=r(),b(s.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),T(n)!=="svelte-kvfsh7"&&(n.textContent=_),i=p(t),w(s.$$.fragment,t)},m(t,h){m(t,n,h),m(t,i,h),v(s,t,h),u=!0},p:ge,i(t){u||($(s.$$.fragment,t),u=!0)},o(t){y(s.$$.fragment,t),u=!1},d(t){t&&(o(n),o(i)),x(s,t)}}}function $t(U){let n,_=`⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes | |
| precedent.`;return{c(){n=c("p"),n.textContent=_},l(i){n=d(i,"P",{"data-svelte-h":!0}),T(n)!=="svelte-17p1lpg"&&(n.textContent=_)},m(i,s){m(i,n,s)},p:ge,d(i){i&&o(n)}}}function yt(U){let n,_="Examples:",i,s,u;return s=new He({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIweGZvcm1lcnMub3BzJTIwaW1wb3J0JTIwTWVtb3J5RWZmaWNpZW50QXR0ZW50aW9uRmxhc2hBdHRlbnRpb25PcCUwQSUwQXBpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItMSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlJTIwJTNEJTIwcGlwZS50byglMjJjdWRhJTIyKSUwQXBpcGUuZW5hYmxlX3hmb3JtZXJzX21lbW9yeV9lZmZpY2llbnRfYXR0ZW50aW9uKGF0dGVudGlvbl9vcCUzRE1lbW9yeUVmZmljaWVudEF0dGVudGlvbkZsYXNoQXR0ZW50aW9uT3ApJTBBJTIzJTIwV29ya2Fyb3VuZCUyMGZvciUyMG5vdCUyMGFjY2VwdGluZyUyMGF0dGVudGlvbiUyMHNoYXBlJTIwdXNpbmclMjBWQUUlMjBmb3IlMjBGbGFzaCUyMEF0dGVudGlvbiUwQXBpcGUudmFlLmVuYWJsZV94Zm9ybWVyc19tZW1vcnlfZWZmaWNpZW50X2F0dGVudGlvbihhdHRlbnRpb25fb3AlM0ROb25lKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> xformers.ops <span class="hljs-keyword">import</span> MemoryEfficientAttentionFlashAttentionOp | |
| <span class="hljs-meta">>>> </span>pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Workaround for not accepting attention shape using VAE for Flash Attention</span> | |
| <span class="hljs-meta">>>> </span>pipe.vae.enable_xformers_memory_efficient_attention(attention_op=<span class="hljs-literal">None</span>)`,wrap:!1}}),{c(){n=c("p"),n.textContent=_,i=r(),b(s.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),T(n)!=="svelte-kvfsh7"&&(n.textContent=_),i=p(t),w(s.$$.fragment,t)},m(t,h){m(t,n,h),m(t,i,h),v(s,t,h),u=!0},p:ge,i(t){u||($(s.$$.fragment,t),u=!0)},o(t){y(s.$$.fragment,t),u=!1},d(t){t&&(o(n),o(i)),x(s,t)}}}function xt(U){let n,_,i,s,u,t,h,Ke='The Stable Diffusion upscaler diffusion model was created by the researchers and engineers from <a href="https://github.com/CompVis" rel="nofollow">CompVis</a>, <a href="https://stability.ai/" rel="nofollow">Stability AI</a>, and <a href="https://laion.ai/" rel="nofollow">LAION</a>. It is used to enhance the resolution of input images by a factor of 4.',_e,Z,be,R,we,f,A,Je,ae,et="Pipeline for text-guided image super-resolution using Stable Diffusion 2.",Ie,oe,tt=`This model inherits from <a href="/docs/diffusers/main/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods | |
| implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Se,ie,nt="The pipeline also inherits the following loading methods:",De,le,st='<li><a href="/docs/diffusers/main/en/api/loaders/textual_inversion#diffusers.loaders.TextualInversionLoaderMixin.load_textual_inversion">load_textual_inversion()</a> for loading textual inversion embeddings</li> <li><a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights">load_lora_weights()</a> for loading LoRA weights</li> <li><a href="/docs/diffusers/main/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights">save_lora_weights()</a> for saving LoRA weights</li> <li><a href="/docs/diffusers/main/en/api/loaders/single_file#diffusers.loaders.FromSingleFileMixin.from_single_file">from_single_file()</a> for loading <code>.ckpt</code> files</li>',Pe,D,F,ke,re,at="The call function to the pipeline for generation.",je,G,Ce,M,Q,Ze,pe,ot=`Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor | |
| in slices to compute attention in several steps. For more than one attention head, the computation is performed | |
| sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.`,Ge,W,We,N,Ne,L,q,Le,ce,it=`Disable sliced attention computation. If <code>enable_attention_slicing</code> was previously called, attention is | |
| computed in one step.`,Ve,J,Y,Xe,de,lt=`Enable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>. When this | |
| option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed | |
| up during training is not guaranteed.`,Be,V,ze,X,Ee,B,O,Re,fe,rt='Disable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>.',Ae,z,H,Fe,me,pt="Encodes the prompt into text encoder hidden states.",ve,K,$e,j,ee,Qe,ue,ct="Output class for Stable Diffusion pipelines.",ye,te,xe,he,Te;return u=new Oe({props:{title:"Super-resolution",local:"super-resolution",headingTag:"h1"}}),Z=new qe({props:{$$slots:{default:[_t]},$$scope:{ctx:U}}}),R=new Oe({props:{title:"StableDiffusionUpscalePipeline",local:"diffusers.StableDiffusionUpscalePipeline",headingTag:"h2"}}),A=new E({props:{name:"class diffusers.StableDiffusionUpscalePipeline",anchor:"diffusers.StableDiffusionUpscalePipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"low_res_scheduler",val:": DDPMScheduler"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"safety_checker",val:": Optional = None"},{name:"feature_extractor",val:": Optional = None"},{name:"watermarker",val:": Optional = None"},{name:"max_noise_level",val:": int = 350"}],parametersDescription:[{anchor:"diffusers.StableDiffusionUpscalePipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/main/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionUpscalePipeline.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>).`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionUpscalePipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| A <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionUpscalePipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/main/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) — | |
| A <code>UNet2DConditionModel</code> to denoise the encoded image latents.`,name:"unet"},{anchor:"diffusers.StableDiffusionUpscalePipeline.low_res_scheduler",description:`<strong>low_res_scheduler</strong> (<a href="/docs/diffusers/main/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) — | |
| A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of | |
| <a href="/docs/diffusers/main/en/api/schedulers/ddpm#diffusers.DDPMScheduler">DDPMScheduler</a>.`,name:"low_res_scheduler"},{anchor:"diffusers.StableDiffusionUpscalePipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/main/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of | |
| <a href="/docs/diffusers/main/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, <a href="/docs/diffusers/main/en/api/schedulers/lms_discrete#diffusers.LMSDiscreteScheduler">LMSDiscreteScheduler</a>, or <a href="/docs/diffusers/main/en/api/schedulers/pndm#diffusers.PNDMScheduler">PNDMScheduler</a>.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py#L68"}}),F=new E({props:{name:"__call__",anchor:"diffusers.StableDiffusionUpscalePipeline.__call__",parameters:[{name:"prompt",val:": Union = None"},{name:"image",val:": Union = None"},{name:"num_inference_steps",val:": int = 75"},{name:"guidance_scale",val:": float = 9.0"},{name:"noise_level",val:": int = 20"},{name:"negative_prompt",val:": Union = None"},{name:"num_images_per_prompt",val:": Optional = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": Union = None"},{name:"latents",val:": Optional = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"output_type",val:": Optional = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": Optional = None"},{name:"callback_steps",val:": int = 1"},{name:"cross_attention_kwargs",val:": Optional = None"},{name:"clip_skip",val:": int = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code> or tensor representing an image batch to be upscaled.`,name:"image"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) from the <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">DDIM</a> paper. Only applies | |
| to the <a href="/docs/diffusers/main/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor is generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a | |
| plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls every <code>callback_steps</code> steps during inference. The function is called with the | |
| following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function is called. If not specified, the callback is called at | |
| every step.`,name:"callback_steps"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py#L534",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images and the | |
| second element is a list of <code>bool</code>s indicating whether the corresponding generated image contains | |
| “not-safe-for-work” (nsfw) content.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),G=new Ye({props:{anchor:"diffusers.StableDiffusionUpscalePipeline.__call__.example",$$slots:{default:[bt]},$$scope:{ctx:U}}}),Q=new E({props:{name:"enable_attention_slicing",anchor:"diffusers.StableDiffusionUpscalePipeline.enable_attention_slicing",parameters:[{name:"slice_size",val:": Union = 'auto'"}],parametersDescription:[{anchor:"diffusers.StableDiffusionUpscalePipeline.enable_attention_slicing.slice_size",description:`<strong>slice_size</strong> (<code>str</code> or <code>int</code>, <em>optional</em>, defaults to <code>"auto"</code>) — | |
| When <code>"auto"</code>, halves the input to the attention heads, so attention will be computed in two steps. If | |
| <code>"max"</code>, maximum amount of memory will be saved by running only one slice at a time. If a number is | |
| provided, uses as many slices as <code>attention_head_dim // slice_size</code>. In this case, <code>attention_head_dim</code> | |
| must be a multiple of <code>slice_size</code>.`,name:"slice_size"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1662"}}),W=new qe({props:{warning:!0,$$slots:{default:[wt]},$$scope:{ctx:U}}}),N=new Ye({props:{anchor:"diffusers.StableDiffusionUpscalePipeline.enable_attention_slicing.example",$$slots:{default:[vt]},$$scope:{ctx:U}}}),q=new E({props:{name:"disable_attention_slicing",anchor:"diffusers.StableDiffusionUpscalePipeline.disable_attention_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1702"}}),Y=new E({props:{name:"enable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionUpscalePipeline.enable_xformers_memory_efficient_attention",parameters:[{name:"attention_op",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionUpscalePipeline.enable_xformers_memory_efficient_attention.attention_op",description:`<strong>attention_op</strong> (<code>Callable</code>, <em>optional</em>) — | |
| Override the default <code>None</code> operator for use as <code>op</code> argument to the | |
| <a href="https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention" rel="nofollow"><code>memory_efficient_attention()</code></a> | |
| function of xFormers.`,name:"attention_op"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1601"}}),V=new qe({props:{warning:!0,$$slots:{default:[$t]},$$scope:{ctx:U}}}),X=new Ye({props:{anchor:"diffusers.StableDiffusionUpscalePipeline.enable_xformers_memory_efficient_attention.example",$$slots:{default:[yt]},$$scope:{ctx:U}}}),O=new E({props:{name:"disable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionUpscalePipeline.disable_xformers_memory_efficient_attention",parameters:[],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1636"}}),H=new E({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt",parameters:[{name:"prompt",val:""},{name:"device",val:""},{name:"num_images_per_prompt",val:""},{name:"do_classifier_free_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"lora_scale",val:": Optional = None"},{name:"clip_skip",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded | |
| device — (<code>torch.device</code>): | |
| torch device`,name:"prompt"},{anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionUpscalePipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py#L207"}}),K=new Oe({props:{title:"StableDiffusionPipelineOutput",local:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",headingTag:"h2"}}),ee=new E({props:{name:"class diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",parameters:[{name:"images",val:": Union"},{name:"nsfw_content_detected",val:": Optional"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) — | |
| List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or | |
| <code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L10"}}),te=new ht({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/stable_diffusion/upscale.md"}}),{c(){n=c("meta"),_=r(),i=c("p"),s=r(),b(u.$$.fragment),t=r(),h=c("p"),h.innerHTML=Ke,_e=r(),b(Z.$$.fragment),be=r(),b(R.$$.fragment),we=r(),f=c("div"),b(A.$$.fragment),Je=r(),ae=c("p"),ae.textContent=et,Ie=r(),oe=c("p"),oe.innerHTML=tt,Se=r(),ie=c("p"),ie.textContent=nt,De=r(),le=c("ul"),le.innerHTML=st,Pe=r(),D=c("div"),b(F.$$.fragment),ke=r(),re=c("p"),re.textContent=at,je=r(),b(G.$$.fragment),Ce=r(),M=c("div"),b(Q.$$.fragment),Ze=r(),pe=c("p"),pe.textContent=ot,Ge=r(),b(W.$$.fragment),We=r(),b(N.$$.fragment),Ne=r(),L=c("div"),b(q.$$.fragment),Le=r(),ce=c("p"),ce.innerHTML=it,Ve=r(),J=c("div"),b(Y.$$.fragment),Xe=r(),de=c("p"),de.innerHTML=lt,Be=r(),b(V.$$.fragment),ze=r(),b(X.$$.fragment),Ee=r(),B=c("div"),b(O.$$.fragment),Re=r(),fe=c("p"),fe.innerHTML=rt,Ae=r(),z=c("div"),b(H.$$.fragment),Fe=r(),me=c("p"),me.textContent=pt,ve=r(),b(K.$$.fragment),$e=r(),j=c("div"),b(ee.$$.fragment),Qe=r(),ue=c("p"),ue.textContent=ct,ye=r(),b(te.$$.fragment),xe=r(),he=c("p"),this.h()},l(e){const l=gt("svelte-u9bgzb",document.head);n=d(l,"META",{name:!0,content:!0}),l.forEach(o),_=p(e),i=d(e,"P",{}),P(i).forEach(o),s=p(e),w(u.$$.fragment,e),t=p(e),h=d(e,"P",{"data-svelte-h":!0}),T(h)!=="svelte-1rmru20"&&(h.innerHTML=Ke),_e=p(e),w(Z.$$.fragment,e),be=p(e),w(R.$$.fragment,e),we=p(e),f=d(e,"DIV",{class:!0});var g=P(f);w(A.$$.fragment,g),Je=p(g),ae=d(g,"P",{"data-svelte-h":!0}),T(ae)!=="svelte-gmgvzq"&&(ae.textContent=et),Ie=p(g),oe=d(g,"P",{"data-svelte-h":!0}),T(oe)!=="svelte-496sm0"&&(oe.innerHTML=tt),Se=p(g),ie=d(g,"P",{"data-svelte-h":!0}),T(ie)!=="svelte-14s6m4u"&&(ie.textContent=nt),De=p(g),le=d(g,"UL",{"data-svelte-h":!0}),T(le)!=="svelte-1m94ukp"&&(le.innerHTML=st),Pe=p(g),D=d(g,"DIV",{class:!0});var C=P(D);w(F.$$.fragment,C),ke=p(C),re=d(C,"P",{"data-svelte-h":!0}),T(re)!=="svelte-50j04k"&&(re.textContent=at),je=p(C),w(G.$$.fragment,C),C.forEach(o),Ce=p(g),M=d(g,"DIV",{class:!0});var I=P(M);w(Q.$$.fragment,I),Ze=p(I),pe=d(I,"P",{"data-svelte-h":!0}),T(pe)!=="svelte-10jaql7"&&(pe.textContent=ot),Ge=p(I),w(W.$$.fragment,I),We=p(I),w(N.$$.fragment,I),I.forEach(o),Ne=p(g),L=d(g,"DIV",{class:!0});var ne=P(L);w(q.$$.fragment,ne),Le=p(ne),ce=d(ne,"P",{"data-svelte-h":!0}),T(ce)!=="svelte-1lh0nh5"&&(ce.innerHTML=it),ne.forEach(o),Ve=p(g),J=d(g,"DIV",{class:!0});var S=P(J);w(Y.$$.fragment,S),Xe=p(S),de=d(S,"P",{"data-svelte-h":!0}),T(de)!=="svelte-e03q3e"&&(de.innerHTML=lt),Be=p(S),w(V.$$.fragment,S),ze=p(S),w(X.$$.fragment,S),S.forEach(o),Ee=p(g),B=d(g,"DIV",{class:!0});var se=P(B);w(O.$$.fragment,se),Re=p(se),fe=d(se,"P",{"data-svelte-h":!0}),T(fe)!=="svelte-1vfte1e"&&(fe.innerHTML=rt),se.forEach(o),Ae=p(g),z=d(g,"DIV",{class:!0});var Ue=P(z);w(H.$$.fragment,Ue),Fe=p(Ue),me=d(Ue,"P",{"data-svelte-h":!0}),T(me)!=="svelte-16q0ax1"&&(me.textContent=pt),Ue.forEach(o),g.forEach(o),ve=p(e),w(K.$$.fragment,e),$e=p(e),j=d(e,"DIV",{class:!0});var Me=P(j);w(ee.$$.fragment,Me),Qe=p(Me),ue=d(Me,"P",{"data-svelte-h":!0}),T(ue)!=="svelte-1qpjiuf"&&(ue.textContent=ct),Me.forEach(o),ye=p(e),w(te.$$.fragment,e),xe=p(e),he=d(e,"P",{}),P(he).forEach(o),this.h()},h(){k(n,"name","hf:doc:metadata"),k(n,"content",Tt),k(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),k(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),k(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),k(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),k(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),k(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),k(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),k(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,l){a(document.head,n),m(e,_,l),m(e,i,l),m(e,s,l),v(u,e,l),m(e,t,l),m(e,h,l),m(e,_e,l),v(Z,e,l),m(e,be,l),v(R,e,l),m(e,we,l),m(e,f,l),v(A,f,null),a(f,Je),a(f,ae),a(f,Ie),a(f,oe),a(f,Se),a(f,ie),a(f,De),a(f,le),a(f,Pe),a(f,D),v(F,D,null),a(D,ke),a(D,re),a(D,je),v(G,D,null),a(f,Ce),a(f,M),v(Q,M,null),a(M,Ze),a(M,pe),a(M,Ge),v(W,M,null),a(M,We),v(N,M,null),a(f,Ne),a(f,L),v(q,L,null),a(L,Le),a(L,ce),a(f,Ve),a(f,J),v(Y,J,null),a(J,Xe),a(J,de),a(J,Be),v(V,J,null),a(J,ze),v(X,J,null),a(f,Ee),a(f,B),v(O,B,null),a(B,Re),a(B,fe),a(f,Ae),a(f,z),v(H,z,null),a(z,Fe),a(z,me),m(e,ve,l),v(K,e,l),m(e,$e,l),m(e,j,l),v(ee,j,null),a(j,Qe),a(j,ue),m(e,ye,l),v(te,e,l),m(e,xe,l),m(e,he,l),Te=!0},p(e,[l]){const g={};l&2&&(g.$$scope={dirty:l,ctx:e}),Z.$set(g);const C={};l&2&&(C.$$scope={dirty:l,ctx:e}),G.$set(C);const I={};l&2&&(I.$$scope={dirty:l,ctx:e}),W.$set(I);const ne={};l&2&&(ne.$$scope={dirty:l,ctx:e}),N.$set(ne);const S={};l&2&&(S.$$scope={dirty:l,ctx:e}),V.$set(S);const se={};l&2&&(se.$$scope={dirty:l,ctx:e}),X.$set(se)},i(e){Te||($(u.$$.fragment,e),$(Z.$$.fragment,e),$(R.$$.fragment,e),$(A.$$.fragment,e),$(F.$$.fragment,e),$(G.$$.fragment,e),$(Q.$$.fragment,e),$(W.$$.fragment,e),$(N.$$.fragment,e),$(q.$$.fragment,e),$(Y.$$.fragment,e),$(V.$$.fragment,e),$(X.$$.fragment,e),$(O.$$.fragment,e),$(H.$$.fragment,e),$(K.$$.fragment,e),$(ee.$$.fragment,e),$(te.$$.fragment,e),Te=!0)},o(e){y(u.$$.fragment,e),y(Z.$$.fragment,e),y(R.$$.fragment,e),y(A.$$.fragment,e),y(F.$$.fragment,e),y(G.$$.fragment,e),y(Q.$$.fragment,e),y(W.$$.fragment,e),y(N.$$.fragment,e),y(q.$$.fragment,e),y(Y.$$.fragment,e),y(V.$$.fragment,e),y(X.$$.fragment,e),y(O.$$.fragment,e),y(H.$$.fragment,e),y(K.$$.fragment,e),y(ee.$$.fragment,e),y(te.$$.fragment,e),Te=!1},d(e){e&&(o(_),o(i),o(s),o(t),o(h),o(_e),o(be),o(we),o(f),o(ve),o($e),o(j),o(ye),o(xe),o(he)),o(n),x(u,e),x(Z,e),x(R,e),x(A),x(F),x(G),x(Q),x(W),x(N),x(q),x(Y),x(V),x(X),x(O),x(H),x(K,e),x(ee),x(te,e)}}}const Tt='{"title":"Super-resolution","local":"super-resolution","sections":[{"title":"StableDiffusionUpscalePipeline","local":"diffusers.StableDiffusionUpscalePipeline","sections":[],"depth":2},{"title":"StableDiffusionPipelineOutput","local":"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput","sections":[],"depth":2}],"depth":1}';function Ut(U){return ft(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class jt extends mt{constructor(n){super(),ut(this,n,Ut,xt,dt,{})}}export{jt as component}; | |
Xet Storage Details
- Size:
- 39.7 kB
- Xet hash:
- 917971de431302a1e4c130b1f6eaf632d5242c2f0f68ccf42922009d287c77e4
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.