Buckets:
| import{s as Wn,o as Ln,n as Bn}from"../chunks/scheduler.53228c21.js";import{S as Gn,i as Nn,e as r,s as a,c,h as An,a as l,d as t,b as i,f as T,g as m,j as d,k as S,l as s,m as o,n as g,t as u,o as f,p as h}from"../chunks/index.100fac89.js";import{D as J}from"../chunks/Docstring.8eea0d47.js";import{C as Pt}from"../chunks/CodeBlock.d30a6509.js";import{E as En}from"../chunks/ExampleCodeBlock.5e9b5749.js";import{H as Je,E as Vn}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.92f39b94.js";function qn(Ue){let _,U="Examples:",$,w,M;return w=new Pt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU2FuYVNwcmludFBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyMFNhbmFTcHJpbnRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRWZmaWNpZW50LUxhcmdlLU1vZGVsJTJGU2FuYV9TcHJpbnRfMS42Ql8xMDI0cHhfZGlmZnVzZXJzJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUzRCUyMmElMjB0aW55JTIwYXN0cm9uYXV0JTIwaGF0Y2hpbmclMjBmcm9tJTIwYW4lMjBlZ2clMjBvbiUyMHRoZSUyMG1vb24lMjIpJTVCMCU1RCUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMm91dHB1dC5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> SanaSprintPipeline | |
| <span class="hljs-meta">>>> </span>pipe = SanaSprintPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=<span class="hljs-string">"a tiny astronaut hatching from an egg on the moon"</span>)[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),{c(){_=r("p"),_.textContent=U,$=a(),c(w.$$.fragment)},l(p){_=l(p,"P",{"data-svelte-h":!0}),d(_)!=="svelte-kvfsh7"&&(_.textContent=U),$=i(p),m(w.$$.fragment,p)},m(p,x){o(p,_,x),o(p,$,x),g(w,p,x),M=!0},p:Bn,i(p){M||(u(w.$$.fragment,p),M=!0)},o(p){f(w.$$.fragment,p),M=!1},d(p){p&&(t(_),t($)),h(w,p)}}}function Rn(Ue){let _,U="Examples:",$,w,M;return w=new Pt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU2FuYVNwcmludEltZzJJbWdQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMubG9hZGluZ191dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwU2FuYVNwcmludEltZzJJbWdQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRWZmaWNpZW50LUxhcmdlLU1vZGVsJTJGU2FuYV9TcHJpbnRfMS42Ql8xMDI0cHhfZGlmZnVzZXJzJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnBlbmd1aW4ucG5nJTIyJTBBKSUwQSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlM0QlMjJhJTIwY3V0ZSUyMHBpbmslMjBiZWFyJTIyJTJDJTIwaW1hZ2UlM0RpbWFnZSUyQyUyMHN0cmVuZ3RoJTNEMC41JTJDJTIwaGVpZ2h0JTNEODMyJTJDJTIwd2lkdGglM0Q0ODApLmltYWdlcyU1QjAlNUQlMEFpbWFnZSU1QjAlNUQuc2F2ZSglMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> SanaSprintImg2ImgPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils.loading_utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = SanaSprintImg2ImgPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/penguin.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=<span class="hljs-string">"a cute pink bear"</span>, image=image, strength=<span class="hljs-number">0.5</span>, height=<span class="hljs-number">832</span>, width=<span class="hljs-number">480</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),{c(){_=r("p"),_.textContent=U,$=a(),c(w.$$.fragment)},l(p){_=l(p,"P",{"data-svelte-h":!0}),d(_)!=="svelte-kvfsh7"&&(_.textContent=U),$=i(p),m(w.$$.fragment,p)},m(p,x){o(p,_,x),o(p,$,x),g(w,p,x),M=!0},p:Bn,i(p){M||(u(w.$$.fragment,p),M=!0)},o(p){f(w.$$.fragment,p),M=!1},d(p){p&&(t(_),t($)),h(w,p)}}}function Hn(Ue){let _,U,$,w,M,p,x,rn='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',Fe,X,ln='<a href="https://huggingface.co/papers/2503.09641" rel="nofollow">SANA-Sprint: One-Step Diffusion with Continuous-Time Consistency Distillation</a> from NVIDIA, MIT HAN Lab, and Hugging Face by Junsong Chen, Shuchen Xue, Yuyang Zhao, Jincheng Yu, Sayak Paul, Junyu Chen, Han Cai, Enze Xie, Song Han',De,F,pn="The abstract from the paper is:",ze,D,dn="<em>This paper presents SANA-Sprint, an efficient diffusion model for ultra-fast text-to-image (T2I) generation. SANA-Sprint is built on a pre-trained foundation model and augmented with hybrid distillation, dramatically reducing inference steps from 20 to 1-4. We introduce three key innovations: (1) We propose a training-free approach that transforms a pre-trained flow-matching model for continuous-time consistency distillation (sCM), eliminating costly training from scratch and achieving high training efficiency. Our hybrid distillation strategy combines sCM with latent adversarial distillation (LADD): sCM ensures alignment with the teacher model, while LADD enhances single-step generation fidelity. (2) SANA-Sprint is a unified step-adaptive model that achieves high-quality generation in 1-4 steps, eliminating step-specific training and improving efficiency. (3) We integrate ControlNet with SANA-Sprint for real-time interactive image generation, enabling instant visual feedback for user interaction. SANA-Sprint establishes a new Pareto frontier in speed-quality tradeoffs, achieving state-of-the-art performance with 7.59 FID and 0.74 GenEval in only 1 step — outperforming FLUX-schnell (7.94 FID / 0.71 GenEval) while being 10× faster (0.1s vs 1.1s on H100). It also achieves 0.1s (T2I) and 0.25s (ControlNet) latency for 1024×1024 images on H100, and 0.31s (T2I) on an RTX 4090, showcasing its exceptional efficiency and potential for AI-powered consumer applications (AIPC). Code and pre-trained models will be open-sourced.</em>",Qe,z,cn='This pipeline was contributed by <a href="https://github.com/lawrence-cj" rel="nofollow">lawrence-cj</a>, <a href="https://github.com/scxue" rel="nofollow">shuchen Xue</a> and <a href="https://github.com/xieenze" rel="nofollow">Enze Xie</a>. The original codebase can be found <a href="https://github.com/NVlabs/Sana" rel="nofollow">here</a>. The original weights can be found under <a href="https://huggingface.co/Efficient-Large-Model/" rel="nofollow">hf.co/Efficient-Large-Model</a>.',Ye,Q,mn="Available models:",Oe,Y,gn='<thead><tr><th align="center">Model</th> <th align="center">Recommended dtype</th></tr></thead> <tbody><tr><td align="center"><a href="https://huggingface.co/Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers" rel="nofollow"><code>Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr> <tr><td align="center"><a href="https://huggingface.co/Efficient-Large-Model/Sana_Sprint_0.6B_1024px_diffusers" rel="nofollow"><code>Efficient-Large-Model/Sana_Sprint_0.6B_1024px_diffusers</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr></tbody>',Ke,O,un='Refer to <a href="https://huggingface.co/collections/Efficient-Large-Model/sana-sprint-67d6810d65235085b3b17c76" rel="nofollow">this</a> collection for more information.',et,K,fn="Note: The recommended dtype mentioned is for the transformer weights. The text encoder must stay in <code>torch.bfloat16</code> and VAE weights must stay in <code>torch.bfloat16</code> or <code>torch.float32</code> for the model to work correctly. Please refer to the inference example below to see how to load the model with the recommended dtype.",tt,ee,nt,te,hn="Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.",at,ne,_n='Refer to the <a href="../../quantization/overview">Quantization</a> overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized <a href="/docs/diffusers/pr_12762/en/api/pipelines/sana_sprint#diffusers.SanaSprintPipeline">SanaSprintPipeline</a> for inference with bitsandbytes.',it,ae,st,ie,ot,se,bn="Users can tweak the <code>max_timesteps</code> value for experimenting with the visual quality of the generated outputs. The default <code>max_timesteps</code> value was obtained with an inference-time search process. For more details about it, check out the paper.",rt,oe,lt,re,yn='The <a href="/docs/diffusers/pr_12762/en/api/pipelines/sana_sprint#diffusers.SanaSprintImg2ImgPipeline">SanaSprintImg2ImgPipeline</a> is a pipeline for image-to-image generation. It takes an input image and a prompt, and generates a new image based on the input image and the prompt.',pt,le,dt,pe,ct,b,de,Ct,Pe,wn='Pipeline for text-to-image generation using <a href="https://huggingface.co/papers/2503.09641" rel="nofollow">SANA-Sprint</a>.',Zt,P,ce,kt,Ce,vn="Function invoked when calling the pipeline for generation.",jt,j,Et,E,me,Bt,Ze,Sn=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Wt,B,ge,Lt,ke,Mn=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Gt,W,ue,Nt,je,Tn=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,At,L,fe,Vt,Ee,xn=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,qt,G,he,Rt,Be,In="Encodes the prompt into text encoder hidden states.",mt,_e,gt,y,be,Ht,We,$n='Pipeline for text-to-image generation using <a href="https://huggingface.co/papers/2503.09641" rel="nofollow">SANA-Sprint</a>.',Xt,C,ye,Ft,Le,Jn="Function invoked when calling the pipeline for generation.",Dt,N,zt,A,we,Qt,Ge,Un=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Yt,V,ve,Ot,Ne,Pn=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Kt,q,Se,en,Ae,Cn=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,tn,R,Me,nn,Ve,Zn=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,an,H,Te,sn,qe,kn="Encodes the prompt into text encoder hidden states.",ut,xe,ft,Z,Ie,on,Re,jn="Output class for Sana pipelines.",ht,$e,_t,Xe,bt;return M=new Je({props:{title:"SANA-Sprint",local:"sana-sprint",headingTag:"h1"}}),ee=new Je({props:{title:"Quantization",local:"quantization",headingTag:"h2"}}),ae=new Pt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQml0c0FuZEJ5dGVzQ29uZmlnJTIwYXMlMjBEaWZmdXNlcnNCaXRzQW5kQnl0ZXNDb25maWclMkMlMjBTYW5hVHJhbnNmb3JtZXIyRE1vZGVsJTJDJTIwU2FuYVNwcmludFBpcGVsaW5lJTBBZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMEJpdHNBbmRCeXRlc0NvbmZpZyUyMGFzJTIwQml0c0FuZEJ5dGVzQ29uZmlnJTJDJTIwQXV0b01vZGVsJTBBJTBBcXVhbnRfY29uZmlnJTIwJTNEJTIwQml0c0FuZEJ5dGVzQ29uZmlnKGxvYWRfaW5fOGJpdCUzRFRydWUpJTBBdGV4dF9lbmNvZGVyXzhiaXQlMjAlM0QlMjBBdXRvTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkVmZmljaWVudC1MYXJnZS1Nb2RlbCUyRlNhbmFfU3ByaW50XzEuNkJfMTAyNHB4X2RpZmZ1c2VycyUyMiUyQyUwQSUyMCUyMCUyMCUyMHN1YmZvbGRlciUzRCUyMnRleHRfZW5jb2RlciUyMiUyQyUwQSUyMCUyMCUyMCUyMHF1YW50aXphdGlvbl9jb25maWclM0RxdWFudF9jb25maWclMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTJDJTBBKSUwQSUwQXF1YW50X2NvbmZpZyUyMCUzRCUyMERpZmZ1c2Vyc0JpdHNBbmRCeXRlc0NvbmZpZyhsb2FkX2luXzhiaXQlM0RUcnVlKSUwQXRyYW5zZm9ybWVyXzhiaXQlMjAlM0QlMjBTYW5hVHJhbnNmb3JtZXIyRE1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJFZmZpY2llbnQtTGFyZ2UtTW9kZWwlMkZTYW5hX1NwcmludF8xLjZCXzEwMjRweF9kaWZmdXNlcnMlMjIlMkMlMEElMjAlMjAlMjAlMjBzdWJmb2xkZXIlM0QlMjJ0cmFuc2Zvcm1lciUyMiUyQyUwQSUyMCUyMCUyMCUyMHF1YW50aXphdGlvbl9jb25maWclM0RxdWFudF9jb25maWclMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTJDJTBBKSUwQSUwQXBpcGVsaW5lJTIwJTNEJTIwU2FuYVNwcmludFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJFZmZpY2llbnQtTGFyZ2UtTW9kZWwlMkZTYW5hX1NwcmludF8xLjZCXzEwMjRweF9kaWZmdXNlcnMlMjIlMkMlMEElMjAlMjAlMjAlMjB0ZXh0X2VuY29kZXIlM0R0ZXh0X2VuY29kZXJfOGJpdCUyQyUwQSUyMCUyMCUyMCUyMHRyYW5zZm9ybWVyJTNEdHJhbnNmb3JtZXJfOGJpdCUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMkMlMEElMjAlMjAlMjAlMjBkZXZpY2VfbWFwJTNEJTIyYmFsYW5jZWQlMjIlMkMlMEEpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYSUyMHRpbnklMjBhc3Ryb25hdXQlMjBoYXRjaGluZyUyMGZyb20lMjBhbiUyMGVnZyUyMG9uJTIwdGhlJTIwbW9vbiUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0KS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJzYW5hLnBuZyUyMik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> BitsAndBytesConfig <span class="hljs-keyword">as</span> DiffusersBitsAndBytesConfig, SanaTransformer2DModel, SanaSprintPipeline | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BitsAndBytesConfig <span class="hljs-keyword">as</span> BitsAndBytesConfig, AutoModel | |
| quant_config = BitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| text_encoder_8bit = AutoModel.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, | |
| subfolder=<span class="hljs-string">"text_encoder"</span>, | |
| quantization_config=quant_config, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| transformer_8bit = SanaTransformer2DModel.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, | |
| subfolder=<span class="hljs-string">"transformer"</span>, | |
| quantization_config=quant_config, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| pipeline = SanaSprintPipeline.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, | |
| text_encoder=text_encoder_8bit, | |
| transformer=transformer_8bit, | |
| torch_dtype=torch.bfloat16, | |
| device_map=<span class="hljs-string">"balanced"</span>, | |
| ) | |
| prompt = <span class="hljs-string">"a tiny astronaut hatching from an egg on the moon"</span> | |
| image = pipeline(prompt).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"sana.png"</span>)`,wrap:!1}}),ie=new Je({props:{title:"Setting max_timesteps",local:"setting-maxtimesteps",headingTag:"h2"}}),oe=new Je({props:{title:"Image to Image",local:"image-to-image",headingTag:"h2"}}),le=new Pt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU2FuYVNwcmludEltZzJJbWdQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMubG9hZGluZ191dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFpbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGcGVuZ3Vpbi5wbmclMjIlMEEpJTBBJTBBcGlwZSUyMCUzRCUyMFNhbmFTcHJpbnRJbWcySW1nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkVmZmljaWVudC1MYXJnZS1Nb2RlbCUyRlNhbmFfU3ByaW50XzEuNkJfMTAyNHB4X2RpZmZ1c2VycyUyMiUyQyUyMCUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QlMjJhJTIwY3V0ZSUyMHBpbmslMjBiZWFyJTIyJTJDJTIwJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUyMCUwQSUyMCUyMCUyMCUyMHN0cmVuZ3RoJTNEMC41JTJDJTIwJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNEODMyJTJDJTIwJTBBJTIwJTIwJTIwJTIwd2lkdGglM0Q0ODAlMEEpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMm91dHB1dC5wbmclMjIp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> SanaSprintImg2ImgPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils.loading_utils <span class="hljs-keyword">import</span> load_image | |
| image = load_image( | |
| <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/penguin.png"</span> | |
| ) | |
| pipe = SanaSprintImg2ImgPipeline.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, | |
| torch_dtype=torch.bfloat16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| image = pipe( | |
| prompt=<span class="hljs-string">"a cute pink bear"</span>, | |
| image=image, | |
| strength=<span class="hljs-number">0.5</span>, | |
| height=<span class="hljs-number">832</span>, | |
| width=<span class="hljs-number">480</span> | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),pe=new Je({props:{title:"SanaSprintPipeline",local:"diffusers.SanaSprintPipeline",headingTag:"h2"}}),de=new J({props:{name:"class diffusers.SanaSprintPipeline",anchor:"diffusers.SanaSprintPipeline",parameters:[{name:"tokenizer",val:": typing.Union[transformers.models.gemma.tokenization_gemma.GemmaTokenizer, transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast]"},{name:"text_encoder",val:": Gemma2PreTrainedModel"},{name:"vae",val:": AutoencoderDC"},{name:"transformer",val:": SanaTransformer2DModel"},{name:"scheduler",val:": DPMSolverMultistepScheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L141"}}),ce=new J({props:{name:"__call__",anchor:"diffusers.SanaSprintPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 2"},{name:"timesteps",val:": typing.List[int] = None"},{name:"max_timesteps",val:": float = 1.5708"},{name:"intermediate_timesteps",val:": float = 1.3"},{name:"guidance_scale",val:": float = 4.5"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"height",val:": int = 1024"},{name:"width",val:": int = 1024"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"clean_caption",val:": bool = False"},{name:"use_resolution_binning",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:`: typing.List[str] = ["Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:", '- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.', '- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.', 'Here are examples of how to transform or refine prompts:', '- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat curled up in a round shape, sleeping peacefully on a warm sunny windowsill, surrounded by pots of blooming red flowers.', '- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps, a diverse crowd of people in colorful clothing, and a double-decker bus passing by towering glass skyscrapers.', 'Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:', 'User Prompt: ']`}],parametersDescription:[{anchor:"diffusers.SanaSprintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.SanaSprintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 20) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.SanaSprintPipeline.__call__.max_timesteps",description:`<strong>max_timesteps</strong> (<code>float</code>, <em>optional</em>, defaults to 1.57080) — | |
| The maximum timestep value used in the SCM scheduler.`,name:"max_timesteps"},{anchor:"diffusers.SanaSprintPipeline.__call__.intermediate_timesteps",description:`<strong>intermediate_timesteps</strong> (<code>float</code>, <em>optional</em>, defaults to 1.3) — | |
| The intermediate timestep value used in SCM scheduler (only used when num_inference_steps=2).`,name:"intermediate_timesteps"},{anchor:"diffusers.SanaSprintPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.SanaSprintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.5) — | |
| Embedded guiddance scale is enabled by setting <code>guidance_scale</code> > 1. Higher <code>guidance_scale</code> encourages | |
| a model to generate images more aligned with <code>prompt</code> at the expense of lower image quality.</p> | |
| <p>Guidance-distilled models approximates true classifer-free guidance for <code>guidance_scale</code> > 1. Refer to | |
| the <a href="https://huggingface.co/papers/2210.03142" rel="nofollow">paper</a> to learn more.`,name:"guidance_scale"},{anchor:"diffusers.SanaSprintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaSprintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.SanaSprintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.SanaSprintPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12762/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.SanaSprintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.SanaSprintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.SanaSprintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaSprintPipeline.__call__.prompt_attention_mask",description:"<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — Pre-generated attention mask for text embeddings.",name:"prompt_attention_mask"},{anchor:"diffusers.SanaSprintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.SanaSprintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.SanaSprintPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.SanaSprintPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"},{anchor:"diffusers.SanaSprintPipeline.__call__.use_resolution_binning",description:`<strong>use_resolution_binning</strong> (<code>bool</code> defaults to <code>True</code>) — | |
| If set to <code>True</code>, the requested height and width are first mapped to the closest resolutions using | |
| <code>ASPECT_RATIO_1024_BIN</code>. After the produced latents are decoded into images, they are resized back to | |
| the requested resolution. Useful for generating non-square images.`,name:"use_resolution_binning"},{anchor:"diffusers.SanaSprintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.SanaSprintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.SanaSprintPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>300</code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"},{anchor:"diffusers.SanaSprintPipeline.__call__.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| Instructions for complex human attention: | |
| <a href="https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55" rel="nofollow">https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55</a>.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L615",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_12762/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_12762/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),j=new En({props:{anchor:"diffusers.SanaSprintPipeline.__call__.example",$$slots:{default:[qn]},$$scope:{ctx:Ue}}}),me=new J({props:{name:"disable_vae_slicing",anchor:"diffusers.SanaSprintPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L187"}}),ge=new J({props:{name:"disable_vae_tiling",anchor:"diffusers.SanaSprintPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L214"}}),ue=new J({props:{name:"enable_vae_slicing",anchor:"diffusers.SanaSprintPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L174"}}),fe=new J({props:{name:"enable_vae_tiling",anchor:"diffusers.SanaSprintPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L200"}}),he=new J({props:{name:"encode_prompt",anchor:"diffusers.SanaSprintPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:": typing.Optional[typing.List[str]] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.SanaSprintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code>, defaults to 300) — Maximum sequence length to use for the prompt.",name:"max_sequence_length"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>list[str]</code>, defaults to <code>complex_human_instruction</code>) — | |
| If <code>complex_human_instruction</code> is not empty, the function will use the complex Human instruction for | |
| the prompt.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L286"}}),_e=new Je({props:{title:"SanaSprintImg2ImgPipeline",local:"diffusers.SanaSprintImg2ImgPipeline",headingTag:"h2"}}),be=new J({props:{name:"class diffusers.SanaSprintImg2ImgPipeline",anchor:"diffusers.SanaSprintImg2ImgPipeline",parameters:[{name:"tokenizer",val:": typing.Union[transformers.models.gemma.tokenization_gemma.GemmaTokenizer, transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast]"},{name:"text_encoder",val:": Gemma2PreTrainedModel"},{name:"vae",val:": AutoencoderDC"},{name:"transformer",val:": SanaTransformer2DModel"},{name:"scheduler",val:": DPMSolverMultistepScheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L147"}}),ye=new J({props:{name:"__call__",anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 2"},{name:"timesteps",val:": typing.List[int] = None"},{name:"max_timesteps",val:": float = 1.5708"},{name:"intermediate_timesteps",val:": float = 1.3"},{name:"guidance_scale",val:": float = 4.5"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"strength",val:": float = 0.6"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"height",val:": int = 1024"},{name:"width",val:": int = 1024"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"clean_caption",val:": bool = False"},{name:"use_resolution_binning",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:`: typing.List[str] = ["Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:", '- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.', '- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.', 'Here are examples of how to transform or refine prompts:', '- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat curled up in a round shape, sleeping peacefully on a warm sunny windowsill, surrounded by pots of blooming red flowers.', '- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps, a diverse crowd of people in colorful clothing, and a double-decker bus passing by towering glass skyscrapers.', 'Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:', 'User Prompt: ']`}],parametersDescription:[{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 20) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.max_timesteps",description:`<strong>max_timesteps</strong> (<code>float</code>, <em>optional</em>, defaults to 1.57080) — | |
| The maximum timestep value used in the SCM scheduler.`,name:"max_timesteps"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.intermediate_timesteps",description:`<strong>intermediate_timesteps</strong> (<code>float</code>, <em>optional</em>, defaults to 1.3) — | |
| The intermediate timestep value used in SCM scheduler (only used when num_inference_steps=2).`,name:"intermediate_timesteps"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.5) — | |
| Guidance scale as defined in <a href="https://arxiv.org/abs/2207.12598" rel="nofollow">Classifier-Free Diffusion Guidance</a>. | |
| <code>guidance_scale</code> is defined as <code>w</code> of equation 2. of <a href="https://arxiv.org/pdf/2205.11487.pdf" rel="nofollow">Imagen | |
| Paper</a>. Guidance scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to the text <code>prompt</code>, | |
| usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">https://arxiv.org/abs/2010.02502</a>. Only applies to | |
| <a href="/docs/diffusers/pr_12762/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.prompt_attention_mask",description:"<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — Pre-generated attention mask for text embeddings.",name:"prompt_attention_mask"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.use_resolution_binning",description:`<strong>use_resolution_binning</strong> (<code>bool</code> defaults to <code>True</code>) — | |
| If set to <code>True</code>, the requested height and width are first mapped to the closest resolutions using | |
| <code>ASPECT_RATIO_1024_BIN</code>. After the produced latents are decoded into images, they are resized back to | |
| the requested resolution. Useful for generating non-square images.`,name:"use_resolution_binning"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>300</code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| Instructions for complex human attention: | |
| <a href="https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55" rel="nofollow">https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55</a>.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L686",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_12762/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_12762/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),N=new En({props:{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.example",$$slots:{default:[Rn]},$$scope:{ctx:Ue}}}),we=new J({props:{name:"disable_vae_slicing",anchor:"diffusers.SanaSprintImg2ImgPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L196"}}),ve=new J({props:{name:"disable_vae_tiling",anchor:"diffusers.SanaSprintImg2ImgPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L224"}}),Se=new J({props:{name:"enable_vae_slicing",anchor:"diffusers.SanaSprintImg2ImgPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L182"}}),Me=new J({props:{name:"enable_vae_tiling",anchor:"diffusers.SanaSprintImg2ImgPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L210"}}),Te=new J({props:{name:"encode_prompt",anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:": typing.Optional[typing.List[str]] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code>, defaults to 300) — Maximum sequence length to use for the prompt.",name:"max_sequence_length"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>list[str]</code>, defaults to <code>complex_human_instruction</code>) — | |
| If <code>complex_human_instruction</code> is not empty, the function will use the complex Human instruction for | |
| the prompt.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L297"}}),xe=new Je({props:{title:"SanaPipelineOutput",local:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",headingTag:"h2"}}),Ie=new J({props:{name:"class diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",anchor:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or numpy array of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion pipeline.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_12762/src/diffusers/pipelines/sana/pipeline_output.py#L11"}}),$e=new Vn({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/sana_sprint.md"}}),{c(){_=r("meta"),U=a(),$=r("p"),w=a(),c(M.$$.fragment),p=a(),x=r("div"),x.innerHTML=rn,Fe=a(),X=r("p"),X.innerHTML=ln,De=a(),F=r("p"),F.textContent=pn,ze=a(),D=r("p"),D.innerHTML=dn,Qe=a(),z=r("p"),z.innerHTML=cn,Ye=a(),Q=r("p"),Q.textContent=mn,Oe=a(),Y=r("table"),Y.innerHTML=gn,Ke=a(),O=r("p"),O.innerHTML=un,et=a(),K=r("p"),K.innerHTML=fn,tt=a(),c(ee.$$.fragment),nt=a(),te=r("p"),te.textContent=hn,at=a(),ne=r("p"),ne.innerHTML=_n,it=a(),c(ae.$$.fragment),st=a(),c(ie.$$.fragment),ot=a(),se=r("p"),se.innerHTML=bn,rt=a(),c(oe.$$.fragment),lt=a(),re=r("p"),re.innerHTML=yn,pt=a(),c(le.$$.fragment),dt=a(),c(pe.$$.fragment),ct=a(),b=r("div"),c(de.$$.fragment),Ct=a(),Pe=r("p"),Pe.innerHTML=wn,Zt=a(),P=r("div"),c(ce.$$.fragment),kt=a(),Ce=r("p"),Ce.textContent=vn,jt=a(),c(j.$$.fragment),Et=a(),E=r("div"),c(me.$$.fragment),Bt=a(),Ze=r("p"),Ze.innerHTML=Sn,Wt=a(),B=r("div"),c(ge.$$.fragment),Lt=a(),ke=r("p"),ke.innerHTML=Mn,Gt=a(),W=r("div"),c(ue.$$.fragment),Nt=a(),je=r("p"),je.textContent=Tn,At=a(),L=r("div"),c(fe.$$.fragment),Vt=a(),Ee=r("p"),Ee.textContent=xn,qt=a(),G=r("div"),c(he.$$.fragment),Rt=a(),Be=r("p"),Be.textContent=In,mt=a(),c(_e.$$.fragment),gt=a(),y=r("div"),c(be.$$.fragment),Ht=a(),We=r("p"),We.innerHTML=$n,Xt=a(),C=r("div"),c(ye.$$.fragment),Ft=a(),Le=r("p"),Le.textContent=Jn,Dt=a(),c(N.$$.fragment),zt=a(),A=r("div"),c(we.$$.fragment),Qt=a(),Ge=r("p"),Ge.innerHTML=Un,Yt=a(),V=r("div"),c(ve.$$.fragment),Ot=a(),Ne=r("p"),Ne.innerHTML=Pn,Kt=a(),q=r("div"),c(Se.$$.fragment),en=a(),Ae=r("p"),Ae.textContent=Cn,tn=a(),R=r("div"),c(Me.$$.fragment),nn=a(),Ve=r("p"),Ve.textContent=Zn,an=a(),H=r("div"),c(Te.$$.fragment),sn=a(),qe=r("p"),qe.textContent=kn,ut=a(),c(xe.$$.fragment),ft=a(),Z=r("div"),c(Ie.$$.fragment),on=a(),Re=r("p"),Re.textContent=jn,ht=a(),c($e.$$.fragment),_t=a(),Xe=r("p"),this.h()},l(e){const n=An("svelte-u9bgzb",document.head);_=l(n,"META",{name:!0,content:!0}),n.forEach(t),U=i(e),$=l(e,"P",{}),T($).forEach(t),w=i(e),m(M.$$.fragment,e),p=i(e),x=l(e,"DIV",{class:!0,"data-svelte-h":!0}),d(x)!=="svelte-si9ct8"&&(x.innerHTML=rn),Fe=i(e),X=l(e,"P",{"data-svelte-h":!0}),d(X)!=="svelte-2zyjd8"&&(X.innerHTML=ln),De=i(e),F=l(e,"P",{"data-svelte-h":!0}),d(F)!=="svelte-1cwsb16"&&(F.textContent=pn),ze=i(e),D=l(e,"P",{"data-svelte-h":!0}),d(D)!=="svelte-rsnwrp"&&(D.innerHTML=dn),Qe=i(e),z=l(e,"P",{"data-svelte-h":!0}),d(z)!=="svelte-1rwl4x2"&&(z.innerHTML=cn),Ye=i(e),Q=l(e,"P",{"data-svelte-h":!0}),d(Q)!=="svelte-1bob28v"&&(Q.textContent=mn),Oe=i(e),Y=l(e,"TABLE",{"data-svelte-h":!0}),d(Y)!=="svelte-1nolrdp"&&(Y.innerHTML=gn),Ke=i(e),O=l(e,"P",{"data-svelte-h":!0}),d(O)!=="svelte-27y0yu"&&(O.innerHTML=un),et=i(e),K=l(e,"P",{"data-svelte-h":!0}),d(K)!=="svelte-5fyxa5"&&(K.innerHTML=fn),tt=i(e),m(ee.$$.fragment,e),nt=i(e),te=l(e,"P",{"data-svelte-h":!0}),d(te)!=="svelte-1ou2pxc"&&(te.textContent=hn),at=i(e),ne=l(e,"P",{"data-svelte-h":!0}),d(ne)!=="svelte-1s0hweg"&&(ne.innerHTML=_n),it=i(e),m(ae.$$.fragment,e),st=i(e),m(ie.$$.fragment,e),ot=i(e),se=l(e,"P",{"data-svelte-h":!0}),d(se)!=="svelte-1nuwixv"&&(se.innerHTML=bn),rt=i(e),m(oe.$$.fragment,e),lt=i(e),re=l(e,"P",{"data-svelte-h":!0}),d(re)!=="svelte-pkeg8n"&&(re.innerHTML=yn),pt=i(e),m(le.$$.fragment,e),dt=i(e),m(pe.$$.fragment,e),ct=i(e),b=l(e,"DIV",{class:!0});var v=T(b);m(de.$$.fragment,v),Ct=i(v),Pe=l(v,"P",{"data-svelte-h":!0}),d(Pe)!=="svelte-19l2afh"&&(Pe.innerHTML=wn),Zt=i(v),P=l(v,"DIV",{class:!0});var k=T(P);m(ce.$$.fragment,k),kt=i(k),Ce=l(k,"P",{"data-svelte-h":!0}),d(Ce)!=="svelte-v78lg8"&&(Ce.textContent=vn),jt=i(k),m(j.$$.fragment,k),k.forEach(t),Et=i(v),E=l(v,"DIV",{class:!0});var yt=T(E);m(me.$$.fragment,yt),Bt=i(yt),Ze=l(yt,"P",{"data-svelte-h":!0}),d(Ze)!=="svelte-1s3c06i"&&(Ze.innerHTML=Sn),yt.forEach(t),Wt=i(v),B=l(v,"DIV",{class:!0});var wt=T(B);m(ge.$$.fragment,wt),Lt=i(wt),ke=l(wt,"P",{"data-svelte-h":!0}),d(ke)!=="svelte-pkn4ui"&&(ke.innerHTML=Mn),wt.forEach(t),Gt=i(v),W=l(v,"DIV",{class:!0});var vt=T(W);m(ue.$$.fragment,vt),Nt=i(vt),je=l(vt,"P",{"data-svelte-h":!0}),d(je)!=="svelte-14bnrb6"&&(je.textContent=Tn),vt.forEach(t),At=i(v),L=l(v,"DIV",{class:!0});var St=T(L);m(fe.$$.fragment,St),Vt=i(St),Ee=l(St,"P",{"data-svelte-h":!0}),d(Ee)!=="svelte-1xwrf7t"&&(Ee.textContent=xn),St.forEach(t),qt=i(v),G=l(v,"DIV",{class:!0});var Mt=T(G);m(he.$$.fragment,Mt),Rt=i(Mt),Be=l(Mt,"P",{"data-svelte-h":!0}),d(Be)!=="svelte-16q0ax1"&&(Be.textContent=In),Mt.forEach(t),v.forEach(t),mt=i(e),m(_e.$$.fragment,e),gt=i(e),y=l(e,"DIV",{class:!0});var I=T(y);m(be.$$.fragment,I),Ht=i(I),We=l(I,"P",{"data-svelte-h":!0}),d(We)!=="svelte-19l2afh"&&(We.innerHTML=$n),Xt=i(I),C=l(I,"DIV",{class:!0});var He=T(C);m(ye.$$.fragment,He),Ft=i(He),Le=l(He,"P",{"data-svelte-h":!0}),d(Le)!=="svelte-v78lg8"&&(Le.textContent=Jn),Dt=i(He),m(N.$$.fragment,He),He.forEach(t),zt=i(I),A=l(I,"DIV",{class:!0});var Tt=T(A);m(we.$$.fragment,Tt),Qt=i(Tt),Ge=l(Tt,"P",{"data-svelte-h":!0}),d(Ge)!=="svelte-1s3c06i"&&(Ge.innerHTML=Un),Tt.forEach(t),Yt=i(I),V=l(I,"DIV",{class:!0});var xt=T(V);m(ve.$$.fragment,xt),Ot=i(xt),Ne=l(xt,"P",{"data-svelte-h":!0}),d(Ne)!=="svelte-pkn4ui"&&(Ne.innerHTML=Pn),xt.forEach(t),Kt=i(I),q=l(I,"DIV",{class:!0});var It=T(q);m(Se.$$.fragment,It),en=i(It),Ae=l(It,"P",{"data-svelte-h":!0}),d(Ae)!=="svelte-14bnrb6"&&(Ae.textContent=Cn),It.forEach(t),tn=i(I),R=l(I,"DIV",{class:!0});var $t=T(R);m(Me.$$.fragment,$t),nn=i($t),Ve=l($t,"P",{"data-svelte-h":!0}),d(Ve)!=="svelte-1xwrf7t"&&(Ve.textContent=Zn),$t.forEach(t),an=i(I),H=l(I,"DIV",{class:!0});var Jt=T(H);m(Te.$$.fragment,Jt),sn=i(Jt),qe=l(Jt,"P",{"data-svelte-h":!0}),d(qe)!=="svelte-16q0ax1"&&(qe.textContent=kn),Jt.forEach(t),I.forEach(t),ut=i(e),m(xe.$$.fragment,e),ft=i(e),Z=l(e,"DIV",{class:!0});var Ut=T(Z);m(Ie.$$.fragment,Ut),on=i(Ut),Re=l(Ut,"P",{"data-svelte-h":!0}),d(Re)!=="svelte-1h3n85u"&&(Re.textContent=jn),Ut.forEach(t),ht=i(e),m($e.$$.fragment,e),_t=i(e),Xe=l(e,"P",{}),T(Xe).forEach(t),this.h()},h(){S(_,"name","hf:doc:metadata"),S(_,"content",Xn),S(x,"class","flex flex-wrap space-x-1"),S(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(b,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),S(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,n){s(document.head,_),o(e,U,n),o(e,$,n),o(e,w,n),g(M,e,n),o(e,p,n),o(e,x,n),o(e,Fe,n),o(e,X,n),o(e,De,n),o(e,F,n),o(e,ze,n),o(e,D,n),o(e,Qe,n),o(e,z,n),o(e,Ye,n),o(e,Q,n),o(e,Oe,n),o(e,Y,n),o(e,Ke,n),o(e,O,n),o(e,et,n),o(e,K,n),o(e,tt,n),g(ee,e,n),o(e,nt,n),o(e,te,n),o(e,at,n),o(e,ne,n),o(e,it,n),g(ae,e,n),o(e,st,n),g(ie,e,n),o(e,ot,n),o(e,se,n),o(e,rt,n),g(oe,e,n),o(e,lt,n),o(e,re,n),o(e,pt,n),g(le,e,n),o(e,dt,n),g(pe,e,n),o(e,ct,n),o(e,b,n),g(de,b,null),s(b,Ct),s(b,Pe),s(b,Zt),s(b,P),g(ce,P,null),s(P,kt),s(P,Ce),s(P,jt),g(j,P,null),s(b,Et),s(b,E),g(me,E,null),s(E,Bt),s(E,Ze),s(b,Wt),s(b,B),g(ge,B,null),s(B,Lt),s(B,ke),s(b,Gt),s(b,W),g(ue,W,null),s(W,Nt),s(W,je),s(b,At),s(b,L),g(fe,L,null),s(L,Vt),s(L,Ee),s(b,qt),s(b,G),g(he,G,null),s(G,Rt),s(G,Be),o(e,mt,n),g(_e,e,n),o(e,gt,n),o(e,y,n),g(be,y,null),s(y,Ht),s(y,We),s(y,Xt),s(y,C),g(ye,C,null),s(C,Ft),s(C,Le),s(C,Dt),g(N,C,null),s(y,zt),s(y,A),g(we,A,null),s(A,Qt),s(A,Ge),s(y,Yt),s(y,V),g(ve,V,null),s(V,Ot),s(V,Ne),s(y,Kt),s(y,q),g(Se,q,null),s(q,en),s(q,Ae),s(y,tn),s(y,R),g(Me,R,null),s(R,nn),s(R,Ve),s(y,an),s(y,H),g(Te,H,null),s(H,sn),s(H,qe),o(e,ut,n),g(xe,e,n),o(e,ft,n),o(e,Z,n),g(Ie,Z,null),s(Z,on),s(Z,Re),o(e,ht,n),g($e,e,n),o(e,_t,n),o(e,Xe,n),bt=!0},p(e,[n]){const v={};n&2&&(v.$$scope={dirty:n,ctx:e}),j.$set(v);const k={};n&2&&(k.$$scope={dirty:n,ctx:e}),N.$set(k)},i(e){bt||(u(M.$$.fragment,e),u(ee.$$.fragment,e),u(ae.$$.fragment,e),u(ie.$$.fragment,e),u(oe.$$.fragment,e),u(le.$$.fragment,e),u(pe.$$.fragment,e),u(de.$$.fragment,e),u(ce.$$.fragment,e),u(j.$$.fragment,e),u(me.$$.fragment,e),u(ge.$$.fragment,e),u(ue.$$.fragment,e),u(fe.$$.fragment,e),u(he.$$.fragment,e),u(_e.$$.fragment,e),u(be.$$.fragment,e),u(ye.$$.fragment,e),u(N.$$.fragment,e),u(we.$$.fragment,e),u(ve.$$.fragment,e),u(Se.$$.fragment,e),u(Me.$$.fragment,e),u(Te.$$.fragment,e),u(xe.$$.fragment,e),u(Ie.$$.fragment,e),u($e.$$.fragment,e),bt=!0)},o(e){f(M.$$.fragment,e),f(ee.$$.fragment,e),f(ae.$$.fragment,e),f(ie.$$.fragment,e),f(oe.$$.fragment,e),f(le.$$.fragment,e),f(pe.$$.fragment,e),f(de.$$.fragment,e),f(ce.$$.fragment,e),f(j.$$.fragment,e),f(me.$$.fragment,e),f(ge.$$.fragment,e),f(ue.$$.fragment,e),f(fe.$$.fragment,e),f(he.$$.fragment,e),f(_e.$$.fragment,e),f(be.$$.fragment,e),f(ye.$$.fragment,e),f(N.$$.fragment,e),f(we.$$.fragment,e),f(ve.$$.fragment,e),f(Se.$$.fragment,e),f(Me.$$.fragment,e),f(Te.$$.fragment,e),f(xe.$$.fragment,e),f(Ie.$$.fragment,e),f($e.$$.fragment,e),bt=!1},d(e){e&&(t(U),t($),t(w),t(p),t(x),t(Fe),t(X),t(De),t(F),t(ze),t(D),t(Qe),t(z),t(Ye),t(Q),t(Oe),t(Y),t(Ke),t(O),t(et),t(K),t(tt),t(nt),t(te),t(at),t(ne),t(it),t(st),t(ot),t(se),t(rt),t(lt),t(re),t(pt),t(dt),t(ct),t(b),t(mt),t(gt),t(y),t(ut),t(ft),t(Z),t(ht),t(_t),t(Xe)),t(_),h(M,e),h(ee,e),h(ae,e),h(ie,e),h(oe,e),h(le,e),h(pe,e),h(de),h(ce),h(j),h(me),h(ge),h(ue),h(fe),h(he),h(_e,e),h(be),h(ye),h(N),h(we),h(ve),h(Se),h(Me),h(Te),h(xe,e),h(Ie),h($e,e)}}}const Xn='{"title":"SANA-Sprint","local":"sana-sprint","sections":[{"title":"Quantization","local":"quantization","sections":[],"depth":2},{"title":"Setting max_timesteps","local":"setting-maxtimesteps","sections":[],"depth":2},{"title":"Image to Image","local":"image-to-image","sections":[],"depth":2},{"title":"SanaSprintPipeline","local":"diffusers.SanaSprintPipeline","sections":[],"depth":2},{"title":"SanaSprintImg2ImgPipeline","local":"diffusers.SanaSprintImg2ImgPipeline","sections":[],"depth":2},{"title":"SanaPipelineOutput","local":"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput","sections":[],"depth":2}],"depth":1}';function Fn(Ue){return Ln(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ea extends Gn{constructor(_){super(),Nn(this,_,Fn,Hn,Wn,{})}}export{ea as component}; | |
Xet Storage Details
- Size:
- 65.7 kB
- Xet hash:
- 3a13745d2a829ddf09666f432ba6b84d660d664f3d7f66aab45591a722f19470
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.