Buckets:

rtrm's picture
download
raw
23.2 kB
<meta charset="utf-8" /><meta name="hf:doc:metadata" content="{&quot;title&quot;:&quot;Speed up inference&quot;,&quot;local&quot;:&quot;speed-up-inference&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;TensorFloat-32&quot;,&quot;local&quot;:&quot;tensorfloat-32&quot;,&quot;sections&quot;:[],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Half-precision weights&quot;,&quot;local&quot;:&quot;half-precision-weights&quot;,&quot;sections&quot;:[],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Distilled model&quot;,&quot;local&quot;:&quot;distilled-model&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Tiny AutoEncoder&quot;,&quot;local&quot;:&quot;tiny-autoencoder&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2}],&quot;depth&quot;:1}">
<link href="/docs/diffusers/pr_10083/en/_app/immutable/assets/0.e3b0c442.css" rel="modulepreload">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/entry/start.3ed1a0f4.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/scheduler.8c3d61f6.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/singletons.da8f2a2c.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/index.0997d446.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/paths.e585b256.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/entry/app.f0e18a17.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/index.da70eac4.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/nodes/0.0db17d3a.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/each.e59479a4.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/nodes/177.db89c7b1.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/Tip.1d9b8c37.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/CodeBlock.00a903b3.js">
<link rel="modulepreload" href="/docs/diffusers/pr_10083/en/_app/immutable/chunks/EditOnGithub.1e64e623.js"><!-- HEAD_svelte-u9bgzb_START --><meta name="hf:doc:metadata" content="{&quot;title&quot;:&quot;Speed up inference&quot;,&quot;local&quot;:&quot;speed-up-inference&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;TensorFloat-32&quot;,&quot;local&quot;:&quot;tensorfloat-32&quot;,&quot;sections&quot;:[],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Half-precision weights&quot;,&quot;local&quot;:&quot;half-precision-weights&quot;,&quot;sections&quot;:[],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Distilled model&quot;,&quot;local&quot;:&quot;distilled-model&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Tiny AutoEncoder&quot;,&quot;local&quot;:&quot;tiny-autoencoder&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2}],&quot;depth&quot;:1}"><!-- HEAD_svelte-u9bgzb_END --> <p></p> <h1 class="relative group"><a id="speed-up-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#speed-up-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Speed up inference</span></h1> <p data-svelte-h="svelte-iqdw6c">There are several ways to optimize Diffusers for inference speed, such as reducing the computational burden by lowering the data precision or using a lightweight distilled model. There are also memory-efficient attention implementations, <a href="xformers">xFormers</a> and <a href="https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html" rel="nofollow">scaled dot product attention</a> in PyTorch 2.0, that reduce memory usage which also indirectly speeds up inference. Different speed optimizations can be stacked together to get the fastest inference times.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p data-svelte-h="svelte-1wnb177">Optimizing for inference speed or reduced memory usage can lead to improved performance in the other category, so you should try to optimize for both whenever you can. This guide focuses on inference speed, but you can learn more about lowering memory usage in the <a href="memory">Reduce memory usage</a> guide.</p></div> <p data-svelte-h="svelte-1866xsh">The inference times below are obtained from generating a single 512x512 image from the prompt “a photo of an astronaut riding a horse on mars” with 50 DDIM steps on a NVIDIA A100.</p> <table data-svelte-h="svelte-pydj5"><thead><tr><th>setup</th> <th>latency</th> <th>speed-up</th></tr></thead> <tbody><tr><td>baseline</td> <td>5.27s</td> <td>x1</td></tr> <tr><td>tf32</td> <td>4.14s</td> <td>x1.27</td></tr> <tr><td>fp16</td> <td>3.51s</td> <td>x1.50</td></tr> <tr><td>combined</td> <td>3.41s</td> <td>x1.54</td></tr></tbody></table> <h2 class="relative group"><a id="tensorfloat-32" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tensorfloat-32"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TensorFloat-32</span></h2> <p data-svelte-h="svelte-zhyvo6">On Ampere and later CUDA devices, matrix multiplications and convolutions can use the <a href="https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/" rel="nofollow">TensorFloat-32 (tf32)</a> mode for faster, but slightly less accurate computations. By default, PyTorch enables tf32 mode for convolutions but not matrix multiplications. Unless your network requires full float32 precision, we recommend enabling tf32 for matrix multiplications. It can significantly speed up computations with typically negligible loss in numerical accuracy.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch
torch.backends.cuda.matmul.allow_tf32 = <span class="hljs-literal">True</span><!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-cmdptw">Learn more about tf32 in the <a href="https://huggingface.co/docs/transformers/en/perf_train_gpu_one#tf32" rel="nofollow">Mixed precision training</a> guide.</p> <h2 class="relative group"><a id="half-precision-weights" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#half-precision-weights"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Half-precision weights</span></h2> <p data-svelte-h="svelte-1qb96mj">To save GPU memory and get more speed, set <code>torch_dtype=torch.float16</code> to load and run the model weights directly with half-precision weights.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>,
torch_dtype=torch.float16,
use_safetensors=<span class="hljs-literal">True</span>,
)
pipe = pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p data-svelte-h="svelte-148gqwg">Don’t use <a href="https://pytorch.org/docs/stable/amp.html#torch.autocast" rel="nofollow">torch.autocast</a> in any of the pipelines as it can lead to black images and is always slower than pure float16 precision.</p></div> <h2 class="relative group"><a id="distilled-model" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#distilled-model"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distilled model</span></h2> <p data-svelte-h="svelte-1b1l0wp">You could also use a distilled Stable Diffusion model and autoencoder to speed up inference. During distillation, many of the UNet’s residual and attention blocks are shed to reduce the model size by 51% and improve latency on CPU/GPU by 43%. The distilled model is faster and uses less memory while generating images of comparable quality to the full Stable Diffusion model.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p data-svelte-h="svelte-11ogmeq">Read the <a href="https://huggingface.co/blog/sd_distillation" rel="nofollow">Open-sourcing Knowledge Distillation Code and Weights of SD-Small and SD-Tiny</a> blog post to learn more about how knowledge distillation training works to produce a faster, smaller, and cheaper generative model.</p></div> <p data-svelte-h="svelte-1h5auas">The inference times below are obtained from generating 4 images from the prompt “a photo of an astronaut riding a horse on mars” with 25 PNDM steps on a NVIDIA A100. Each generation is repeated 3 times with the distilled Stable Diffusion v1.4 model by <a href="https://hf.co/nota-ai" rel="nofollow">Nota AI</a>.</p> <table data-svelte-h="svelte-a4bqm9"><thead><tr><th>setup</th> <th>latency</th> <th>speed-up</th></tr></thead> <tbody><tr><td>baseline</td> <td>6.37s</td> <td>x1</td></tr> <tr><td>distilled</td> <td>4.18s</td> <td>x1.52</td></tr> <tr><td>distilled + tiny autoencoder</td> <td>3.83s</td> <td>x1.66</td></tr></tbody></table> <p data-svelte-h="svelte-1jin96g">Let’s load the distilled Stable Diffusion model and compare it against the original Stable Diffusion model.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline
<span class="hljs-keyword">import</span> torch
distilled = StableDiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;nota-ai/bk-sdm-small&quot;</span>, torch_dtype=torch.float16, use_safetensors=<span class="hljs-literal">True</span>,
).to(<span class="hljs-string">&quot;cuda&quot;</span>)
prompt = <span class="hljs-string">&quot;a golden vase with different flowers&quot;</span>
generator = torch.manual_seed(<span class="hljs-number">2023</span>)
image = distilled(<span class="hljs-string">&quot;a golden vase with different flowers&quot;</span>, num_inference_steps=<span class="hljs-number">25</span>, generator=generator).images[<span class="hljs-number">0</span>]
image<!-- HTML_TAG_END --></pre></div> <div class="flex gap-4" data-svelte-h="svelte-1drjdrr"><div><img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/original_sd.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">original Stable Diffusion</figcaption></div> <div><img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">distilled Stable Diffusion</figcaption></div></div> <h3 class="relative group"><a id="tiny-autoencoder" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tiny-autoencoder"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Tiny AutoEncoder</span></h3> <p data-svelte-h="svelte-1nd97xm">To speed inference up even more, replace the autoencoder with a <a href="https://huggingface.co/sayakpaul/taesdxl-diffusers" rel="nofollow">distilled version</a> of it.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoencoderTiny, StableDiffusionPipeline
distilled = StableDiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;nota-ai/bk-sdm-small&quot;</span>, torch_dtype=torch.float16, use_safetensors=<span class="hljs-literal">True</span>,
).to(<span class="hljs-string">&quot;cuda&quot;</span>)
distilled.vae = AutoencoderTiny.from_pretrained(
<span class="hljs-string">&quot;sayakpaul/taesd-diffusers&quot;</span>, torch_dtype=torch.float16, use_safetensors=<span class="hljs-literal">True</span>,
).to(<span class="hljs-string">&quot;cuda&quot;</span>)
prompt = <span class="hljs-string">&quot;a golden vase with different flowers&quot;</span>
generator = torch.manual_seed(<span class="hljs-number">2023</span>)
image = distilled(<span class="hljs-string">&quot;a golden vase with different flowers&quot;</span>, num_inference_steps=<span class="hljs-number">25</span>, generator=generator).images[<span class="hljs-number">0</span>]
image<!-- HTML_TAG_END --></pre></div> <div class="flex justify-center" data-svelte-h="svelte-5hr8va"><div><img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd_vae.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">distilled Stable Diffusion + Tiny AutoEncoder</figcaption></div></div> <p data-svelte-h="svelte-aixrxn">More tiny autoencoder models for other Stable Diffusion models, like Stable Diffusion 3, are available from <a href="https://huggingface.co/madebyollin" rel="nofollow">madebyollin</a>.</p> <a class="!text-gray-400 !no-underline text-sm flex items-center not-prose mt-4" href="https://github.com/huggingface/diffusers/blob/main/docs/source/en/optimization/fp16.md" target="_blank"><span data-svelte-h="svelte-1kd6by1">&lt;</span> <span data-svelte-h="svelte-x0xyl0">&gt;</span> <span data-svelte-h="svelte-1dajgef"><span class="underline ml-1.5">Update</span> on GitHub</span></a> <p></p>
<script>
{
__sveltekit_1p97lbw = {
assets: "/docs/diffusers/pr_10083/en",
base: "/docs/diffusers/pr_10083/en",
env: {}
};
const element = document.currentScript.parentElement;
const data = [null,null];
Promise.all([
import("/docs/diffusers/pr_10083/en/_app/immutable/entry/start.3ed1a0f4.js"),
import("/docs/diffusers/pr_10083/en/_app/immutable/entry/app.f0e18a17.js")
]).then(([kit, app]) => {
kit.start(app, element, {
node_ids: [0, 177],
data,
form: null,
error: null
});
});
}
</script>

Xet Storage Details

Size:
23.2 kB
·
Xet hash:
df145d86d8973cd35203f4b031b6e1b31e999933bb1d1d10d2cf2018a0f55681

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.