Buckets:
| <meta charset="utf-8" /><meta name="hf:doc:metadata" content="{"title":"NVIDIA ModelOpt","local":"nvidia-modelopt","sections":[{"title":"NVIDIAModelOptConfig","local":"nvidiamodeloptconfig","sections":[],"depth":2},{"title":"Supported quantization types","local":"supported-quantization-types","sections":[],"depth":2},{"title":"Serializing and Deserializing quantized models","local":"serializing-and-deserializing-quantized-models","sections":[],"depth":2}],"depth":1}"> | |
| <link href="/docs/diffusers/pr_12411/en/_app/immutable/assets/0.e3b0c442.css" rel="modulepreload"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/entry/start.3e0bd2b6.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/scheduler.53228c21.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/singletons.65de65e1.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/index.e93d0901.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/paths.d8aaea9f.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/entry/app.075ca386.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/preload-helper.a3bbca49.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/index.100fac89.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/nodes/0.64b9a56d.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/each.e59479a4.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/nodes/294.851b58ff.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/CopyLLMTxtMenu.c51e21f1.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/globals.7f7f1b26.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/IconCopy.38cf8f56.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/MermaidChart.svelte_svelte_type_style_lang.5af3dadc.js"> | |
| <link rel="modulepreload" href="/docs/diffusers/pr_12411/en/_app/immutable/chunks/CodeBlock.d30a6509.js"><!-- HEAD_svelte-u9bgzb_START --><meta name="hf:doc:metadata" content="{"title":"NVIDIA ModelOpt","local":"nvidia-modelopt","sections":[{"title":"NVIDIAModelOptConfig","local":"nvidiamodeloptconfig","sections":[],"depth":2},{"title":"Supported quantization types","local":"supported-quantization-types","sections":[],"depth":2},{"title":"Serializing and Deserializing quantized models","local":"serializing-and-deserializing-quantized-models","sections":[],"depth":2}],"depth":1}"><!-- HEAD_svelte-u9bgzb_END --> <p></p> <div class="items-center shrink-0 min-w-[100px] max-sm:min-w-[50px] justify-end ml-auto flex" style="float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"><div class="inline-flex rounded-md max-sm:rounded-sm"><button class="inline-flex items-center gap-1 max-sm:gap-0.5 h-6 max-sm:h-5 px-2 max-sm:px-1.5 text-[11px] max-sm:text-[9px] font-medium text-gray-800 border border-r-0 rounded-l-md max-sm:rounded-l-sm border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-live="polite"><span class="inline-flex items-center justify-center rounded-md p-0.5 max-sm:p-0"><svg class="w-3 h-3 max-sm:w-2.5 max-sm:h-2.5" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg></span> <span>Copy page</span></button> <button class="inline-flex items-center justify-center w-6 max-sm:w-5 h-6 max-sm:h-5 disabled:pointer-events-none text-sm text-gray-500 hover:text-gray-700 dark:hover:text-white rounded-r-md max-sm:rounded-r-sm border border-l transition border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-haspopup="menu" aria-expanded="false" aria-label="Open copy menu"><svg class="transition-transform text-gray-400 overflow-visible w-3 h-3 max-sm:w-2.5 max-sm:h-2.5 rotate-0" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></button></div> </div> <h1 class="relative group"><a id="nvidia-modelopt" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#nvidia-modelopt"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>NVIDIA ModelOpt</span></h1> <p data-svelte-h="svelte-az9epi"><a href="https://github.com/NVIDIA/Model-Optimizer" rel="nofollow">NVIDIA-ModelOpt</a> is a unified library of state-of-the-art model optimization techniques like quantization, pruning, distillation, speculative decoding, etc. It compresses deep learning models for downstream deployment frameworks like TensorRT-LLM or TensorRT to optimize inference speed.</p> <p data-svelte-h="svelte-ieekzb">Before you begin, make sure you have nvidia_modelopt installed.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->pip install -U <span class="hljs-string">"nvidia_modelopt[hf]"</span><!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1l4eedg">Quantize a model by passing <code>NVIDIAModelOptConfig</code> to <a href="/docs/diffusers/pr_12411/en/api/models/overview#diffusers.ModelMixin.from_pretrained">from_pretrained()</a> (you can also load pre-quantized models). This works for any model in any modality, as long as it supports loading with <a href="https://hf.co/docs/accelerate/index" rel="nofollow">Accelerate</a> and contains <code>torch.nn.Linear</code> layers.</p> <p data-svelte-h="svelte-1035mg9">The example below only quantizes the weights to FP8.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoModel, SanaPipeline, NVIDIAModelOptConfig | |
| model_id = <span class="hljs-string">"Efficient-Large-Model/Sana_600M_1024px_diffusers"</span> | |
| dtype = torch.bfloat16 | |
| quantization_config = NVIDIAModelOptConfig(quant_type=<span class="hljs-string">"FP8"</span>, quant_method=<span class="hljs-string">"modelopt"</span>) | |
| transformer = AutoModel.from_pretrained( | |
| model_id, | |
| subfolder=<span class="hljs-string">"transformer"</span>, | |
| quantization_config=quantization_config, | |
| torch_dtype=dtype, | |
| ) | |
| pipe = SanaPipeline.from_pretrained( | |
| model_id, | |
| transformer=transformer, | |
| torch_dtype=dtype, | |
| ) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-built_in">print</span>(<span class="hljs-string">f"Pipeline memory usage: <span class="hljs-subst">{torch.cuda.max_memory_reserved() / <span class="hljs-number">1024</span>**<span class="hljs-number">3</span>:<span class="hljs-number">.3</span>f}</span> GB"</span>) | |
| prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| image = pipe( | |
| prompt, num_inference_steps=<span class="hljs-number">50</span>, guidance_scale=<span class="hljs-number">4.5</span>, max_sequence_length=<span class="hljs-number">512</span> | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)<!-- HTML_TAG_END --></pre></div> <blockquote data-svelte-h="svelte-u1gwwd"><p><strong>Note:</strong></p> <p>The quantization methods in NVIDIA-ModelOpt are designed to reduce the memory footprint of model weights using various QAT (Quantization-Aware Training) and PTQ (Post-Training Quantization) techniques while maintaining model performance. However, the actual performance gain during inference depends on the deployment framework (e.g., TRT-LLM, TensorRT) and the specific hardware configuration.</p> <p>More details can be found <a href="https://github.com/NVIDIA/Model-Optimizer/tree/main/examples" rel="nofollow">here</a>.</p></blockquote> <h2 class="relative group"><a id="nvidiamodeloptconfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#nvidiamodeloptconfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>NVIDIAModelOptConfig</span></h2> <p data-svelte-h="svelte-nf8ias">The <code>NVIDIAModelOptConfig</code> class accepts three parameters:</p> <ul data-svelte-h="svelte-cct04z"><li><code>quant_type</code>: A string value mentioning one of the quantization types below.</li> <li><code>modules_to_not_convert</code>: A list of module full/partial module names for which quantization should not be performed. For example, to not perform any quantization of the <a href="/docs/diffusers/pr_12411/en/api/models/sd3_transformer2d#diffusers.SD3Transformer2DModel">SD3Transformer2DModel</a>’s pos_embed projection blocks, one would specify: <code>modules_to_not_convert=["pos_embed.proj.weight"]</code>.</li> <li><code>disable_conv_quantization</code>: A boolean value which when set to <code>True</code> disables quantization for all convolutional layers in the model. This is useful as channel and block quantization generally don’t work well with convolutional layers (used with INT4, NF4, NVFP4). If you want to disable quantization for specific convolutional layers, use <code>modules_to_not_convert</code> instead.</li> <li><code>algorithm</code>: The algorithm to use for determining scale, defaults to <code>"max"</code>. You can check modelopt documentation for more algorithms and details.</li> <li><code>forward_loop</code>: The forward loop function to use for calibrating activation during quantization. If not provided, it relies on static scale values computed using the weights only.</li> <li><code>kwargs</code>: A dict of keyword arguments to pass to the underlying quantization method which will be invoked based on <code>quant_type</code>.</li></ul> <h2 class="relative group"><a id="supported-quantization-types" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#supported-quantization-types"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Supported quantization types</span></h2> <p data-svelte-h="svelte-1qozamx">ModelOpt supports weight-only, channel and block quantization int8, fp8, int4, nf4, and nvfp4. The quantization methods are designed to reduce the memory footprint of the model weights while maintaining the performance of the model during inference.</p> <p data-svelte-h="svelte-17x1tdo">Weight-only quantization stores the model weights in a specific low-bit data type but performs computation with a higher-precision data type, like <code>bfloat16</code>. This lowers the memory requirements from model weights but retains the memory peaks for activation computation.</p> <p data-svelte-h="svelte-4xexxq">The quantization methods supported are as follows:</p> <table data-svelte-h="svelte-zlixfj"><thead><tr><th><strong>Quantization Type</strong></th> <th><strong>Supported Schemes</strong></th> <th><strong>Required Kwargs</strong></th> <th><strong>Additional Notes</strong></th></tr></thead> <tbody><tr><td><strong>INT8</strong></td> <td><code>int8 weight only</code>, <code>int8 channel quantization</code>, <code>int8 block quantization</code></td> <td><code>quant_type</code>, <code>quant_type + channel_quantize</code>, <code>quant_type + channel_quantize + block_quantize</code></td> <td></td></tr> <tr><td><strong>FP8</strong></td> <td><code>fp8 weight only</code>, <code>fp8 channel quantization</code>, <code>fp8 block quantization</code></td> <td><code>quant_type</code>, <code>quant_type + channel_quantize</code>, <code>quant_type + channel_quantize + block_quantize</code></td> <td></td></tr> <tr><td><strong>INT4</strong></td> <td><code>int4 weight only</code>, <code>int4 block quantization</code></td> <td><code>quant_type</code>, <code>quant_type + channel_quantize + block_quantize</code></td> <td><code>channel_quantize = -1 is only supported for now</code></td></tr> <tr><td><strong>NF4</strong></td> <td><code>nf4 weight only</code>, <code>nf4 double block quantization</code></td> <td><code>quant_type</code>, <code>quant_type + channel_quantize + block_quantize + scale_channel_quantize</code> + <code>scale_block_quantize</code></td> <td><code>channel_quantize = -1 and scale_channel_quantize = -1 are only supported for now</code></td></tr> <tr><td><strong>NVFP4</strong></td> <td><code>nvfp4 weight only</code>, <code>nvfp4 block quantization</code></td> <td><code>quant_type</code>, <code>quant_type + channel_quantize + block_quantize</code></td> <td><code>channel_quantize = -1 is only supported for now</code></td></tr></tbody></table> <p data-svelte-h="svelte-fqo6i0">Refer to the <a href="https://nvidia.github.io/Model-Optimizer/" rel="nofollow">official modelopt documentation</a> for a better understanding of the available quantization methods and the exhaustive list of configuration options available.</p> <h2 class="relative group"><a id="serializing-and-deserializing-quantized-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#serializing-and-deserializing-quantized-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Serializing and Deserializing quantized models</span></h2> <p data-svelte-h="svelte-1tei4fm">To serialize a quantized model in a given dtype, first load the model with the desired quantization dtype and then save it using the <a href="/docs/diffusers/pr_12411/en/api/models/overview#diffusers.ModelMixin.save_pretrained">save_pretrained()</a> method.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoModel, NVIDIAModelOptConfig | |
| <span class="hljs-keyword">from</span> modelopt.torch.opt <span class="hljs-keyword">import</span> enable_huggingface_checkpointing | |
| enable_huggingface_checkpointing() | |
| model_id = <span class="hljs-string">"Efficient-Large-Model/Sana_600M_1024px_diffusers"</span> | |
| quant_config_fp8 = {<span class="hljs-string">"quant_type"</span>: <span class="hljs-string">"FP8"</span>, <span class="hljs-string">"quant_method"</span>: <span class="hljs-string">"modelopt"</span>} | |
| quant_config_fp8 = NVIDIAModelOptConfig(**quant_config_fp8) | |
| model = AutoModel.from_pretrained( | |
| model_id, | |
| subfolder=<span class="hljs-string">"transformer"</span>, | |
| quantization_config=quant_config_fp8, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| model.save_pretrained(<span class="hljs-string">'path/to/sana_fp8'</span>, safe_serialization=<span class="hljs-literal">False</span>)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1f5kfpw">To load a serialized quantized model, use the <a href="/docs/diffusers/pr_12411/en/api/models/overview#diffusers.ModelMixin.from_pretrained">from_pretrained()</a> method.</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoModel, NVIDIAModelOptConfig, SanaPipeline | |
| <span class="hljs-keyword">from</span> modelopt.torch.opt <span class="hljs-keyword">import</span> enable_huggingface_checkpointing | |
| enable_huggingface_checkpointing() | |
| quantization_config = NVIDIAModelOptConfig(quant_type=<span class="hljs-string">"FP8"</span>, quant_method=<span class="hljs-string">"modelopt"</span>) | |
| transformer = AutoModel.from_pretrained( | |
| <span class="hljs-string">"path/to/sana_fp8"</span>, | |
| subfolder=<span class="hljs-string">"transformer"</span>, | |
| quantization_config=quantization_config, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| pipe = SanaPipeline.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_600M_1024px_diffusers"</span>, | |
| transformer=transformer, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| image = pipe( | |
| prompt, num_inference_steps=<span class="hljs-number">50</span>, guidance_scale=<span class="hljs-number">4.5</span>, max_sequence_length=<span class="hljs-number">512</span> | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)<!-- HTML_TAG_END --></pre></div> <a class="!text-gray-400 !no-underline text-sm flex items-center not-prose mt-4" href="https://github.com/huggingface/diffusers/blob/main/docs/source/en/quantization/modelopt.md" target="_blank"><svg class="mr-1" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M31,16l-7,7l-1.41-1.41L28.17,16l-5.58-5.59L24,9l7,7z"></path><path d="M1,16l7-7l1.41,1.41L3.83,16l5.58,5.59L8,23l-7-7z"></path><path d="M12.419,25.484L17.639,6.552l1.932,0.518L14.351,26.002z"></path></svg> <span data-svelte-h="svelte-zjs2n5"><span class="underline">Update</span> on GitHub</span></a> <p></p> | |
| <script> | |
| { | |
| __sveltekit_r19zh6 = { | |
| assets: "/docs/diffusers/pr_12411/en", | |
| base: "/docs/diffusers/pr_12411/en", | |
| env: {} | |
| }; | |
| const element = document.currentScript.parentElement; | |
| const data = [null,null]; | |
| Promise.all([ | |
| import("/docs/diffusers/pr_12411/en/_app/immutable/entry/start.3e0bd2b6.js"), | |
| import("/docs/diffusers/pr_12411/en/_app/immutable/entry/app.075ca386.js") | |
| ]).then(([kit, app]) => { | |
| kit.start(app, element, { | |
| node_ids: [0, 294], | |
| data, | |
| form: null, | |
| error: null | |
| }); | |
| }); | |
| } | |
| </script> | |
Xet Storage Details
- Size:
- 26.6 kB
- Xet hash:
- 4eea6a32accec9eb473242d5b8594171a5be00c0de8b1c86a1ed7bed9df55a59
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.