Buckets:
| import{s as Mt,o as Ht,n as nt}from"../chunks/scheduler.8c3d61f6.js";import{S as Dt,i as kt,g as r,s as i,r as m,A as Ut,h as l,f as n,c as a,j as fe,u,x as p,k as ge,y as b,a as o,v as f,d as g,t as _,w as h}from"../chunks/index.da70eac4.js";import{T as $t}from"../chunks/Tip.1d9b8c37.js";import{D as tt}from"../chunks/Docstring.e0ce17c5.js";import{C as me}from"../chunks/CodeBlock.a9c4becf.js";import{E as Pt}from"../chunks/ExampleCodeBlock.a85f904c.js";import{H as _e,E as Ct}from"../chunks/getInferenceSnippets.fffe7af1.js";function Jt(D){let s,x='Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.';return{c(){s=r("p"),s.innerHTML=x},l(d){s=l(d,"P",{"data-svelte-h":!0}),p(s)!=="svelte-1qn15hi"&&(s.innerHTML=x)},m(d,T){o(d,s,T)},p:nt,d(d){d&&n(s)}}}function jt(D){let s,x='You can further improve generation quality by passing the generated image from <code>HungyuanDiTPipeline</code> to the <a href="../../using-diffusers/sdxl#base-to-refiner-model">SDXL refiner</a> model.';return{c(){s=r("p"),s.innerHTML=x},l(d){s=l(d,"P",{"data-svelte-h":!0}),p(s)!=="svelte-1qcmfm0"&&(s.innerHTML=x)},m(d,T){o(d,s,T)},p:nt,d(d){d&&n(s)}}}function It(D){let s,x="Examples:",d,T,w;return T=new me({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSHVueXVhbkRpVFBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyMEh1bnl1YW5EaVRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyVGVuY2VudC1IdW55dWFuJTJGSHVueXVhbkRpVC1EaWZmdXNlcnMlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQSUyMyUyMFlvdSUyMG1heSUyMGFsc28lMjB1c2UlMjBFbmdsaXNoJTIwcHJvbXB0JTIwYXMlMjBIdW55dWFuRGlUJTIwc3VwcG9ydHMlMjBib3RoJTIwRW5nbGlzaCUyMGFuZCUyMENoaW5lc2UlMEElMjMlMjBwcm9tcHQlMjAlM0QlMjAlMjJBbiUyMGFzdHJvbmF1dCUyMHJpZGluZyUyMGElMjBob3JzZSUyMiUwQXByb21wdCUyMCUzRCUyMCUyMiVFNCVCOCU4MCVFNCVCOCVBQSVFNSVBRSU4NyVFOCU4OCVBQSVFNSU5MSU5OCVFNSU5QyVBOCVFOSVBQSU5MSVFOSVBOSVBQyUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> HunyuanDiTPipeline | |
| <span class="hljs-meta">>>> </span>pipe = HunyuanDiTPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Tencent-Hunyuan/HunyuanDiT-Diffusers"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># You may also use English prompt as HunyuanDiT supports both English and Chinese</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># prompt = "An astronaut riding a horse"</span> | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"一个宇航员在骑马"</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){s=r("p"),s.textContent=x,d=i(),m(T.$$.fragment)},l(c){s=l(c,"P",{"data-svelte-h":!0}),p(s)!=="svelte-kvfsh7"&&(s.textContent=x),d=a(c),u(T.$$.fragment,c)},m(c,$){o(c,s,$),o(c,d,$),f(T,c,$),w=!0},p:nt,i(c){w||(g(T.$$.fragment,c),w=!0)},o(c){_(T.$$.fragment,c),w=!1},d(c){c&&(n(s),n(d)),h(T,c)}}}function Lt(D){let s,x,d,T,w,c,$,ot='<img src="https://github.com/gnobitab/diffusers-hunyuan/assets/1157982/39b99036-c3cb-4f16-bb1a-40ec25eda573" alt="chinese elements understanding"/>',he,J,it='<a href="https://huggingface.co/papers/2405.08748" rel="nofollow">Hunyuan-DiT : A Powerful Multi-Resolution Diffusion Transformer with Fine-Grained Chinese Understanding</a> from Tencent Hunyuan.',ye,j,at="The abstract from the paper is:",Te,I,st="<em>We present Hunyuan-DiT, a text-to-image diffusion transformer with fine-grained understanding of both English and Chinese. To construct Hunyuan-DiT, we carefully design the transformer structure, text encoder, and positional encoding. We also build from scratch a whole data pipeline to update and evaluate data for iterative model optimization. For fine-grained language understanding, we train a Multimodal Large Language Model to refine the captions of the images. Finally, Hunyuan-DiT can perform multi-turn multimodal dialogue with users, generating and refining images according to the context. Through our holistic human evaluation protocol with more than 50 professional human evaluators, Hunyuan-DiT sets a new state-of-the-art in Chinese-to-image generation compared with other open-source models.</em>",be,L,rt='You can find the original codebase at <a href="https://github.com/Tencent/HunyuanDiT" rel="nofollow">Tencent/HunyuanDiT</a> and all the available checkpoints at <a href="https://huggingface.co/Tencent-Hunyuan/HunyuanDiT" rel="nofollow">Tencent-Hunyuan</a>.',ve,S,lt="<strong>Highlights</strong>: HunyuanDiT supports Chinese/English-to-image, multi-resolution generation.",xe,B,pt="HunyuanDiT has the following components:",we,z,dt="<li>It uses a diffusion transformer as the backbone</li> <li>It combines two text encoders, a bilingual CLIP and a multilingual T5 encoder</li>",$e,k,Me,U,He,N,De,G,ct='You can optimize the pipeline’s runtime and memory consumption with torch.compile and feed-forward chunking. To learn about other optimization methods, check out the <a href="../../optimization/fp16">Speed up inference</a> and <a href="../../optimization/memory">Reduce memory usage</a> guides.',ke,Z,Ue,V,mt='Use <a href="https://huggingface.co/docs/diffusers/main/en/tutorials/fast_diffusion#torchcompile" rel="nofollow"><code>torch.compile</code></a> to reduce the inference latency.',Pe,W,ut="First, load the pipeline:",Ce,R,Je,O,ft="Then change the memory layout of the pipelines <code>transformer</code> and <code>vae</code> components to <code>torch.channels-last</code>:",je,F,Ie,E,gt="Finally, compile the components and run inference:",Le,q,Se,Q,_t='The <a href="https://gist.github.com/sayakpaul/29d3a14905cfcbf611fe71ebd22e9b23" rel="nofollow">benchmark</a> results on a 80GB A100 machine are:',Be,X,ze,A,Ne,Y,ht='By loading the T5 text encoder in 8 bits, you can run the pipeline in just under 6 GBs of GPU VRAM. Refer to <a href="https://gist.github.com/sayakpaul/3154605f6af05b98a41081aaba5ca43e" rel="nofollow">this script</a> for details.',Ge,K,yt='Furthermore, you can use the <a href="/docs/diffusers/pr_11105/en/api/models/hunyuan_transformer2d#diffusers.HunyuanDiT2DModel.enable_forward_chunking">enable_forward_chunking()</a> method to reduce memory usage. Feed-forward chunking runs the feed-forward layers in a transformer block in a loop instead of all at once. This gives you a trade-off between memory consumption and inference runtime.',Ze,ee,Ve,te,We,y,ne,Ee,re,Tt="Pipeline for English/Chinese-to-image generation using HunyuanDiT.",qe,le,bt=`This model inherits from <a href="/docs/diffusers/pr_11105/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods the | |
| library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)`,Qe,pe,vt=`HunyuanDiT uses two text encoders: <a href="https://huggingface.co/google/mt5-base" rel="nofollow">mT5</a> and [bilingual CLIP](fine-tuned by | |
| ourselves)`,Xe,M,oe,Ae,de,xt="The call function to the pipeline for generation with HunyuanDiT.",Ye,P,Ke,C,ie,et,ce,wt="Encodes the prompt into text encoder hidden states.",Re,ae,Oe,ue,Fe;return w=new _e({props:{title:"Hunyuan-DiT",local:"hunyuan-dit",headingTag:"h1"}}),k=new $t({props:{$$slots:{default:[Jt]},$$scope:{ctx:D}}}),U=new $t({props:{$$slots:{default:[jt]},$$scope:{ctx:D}}}),N=new _e({props:{title:"Optimization",local:"optimization",headingTag:"h2"}}),Z=new _e({props:{title:"Inference",local:"inference",headingTag:"h3"}}),R=new me({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEh1bnl1YW5EaVRQaXBlbGluZSUwQWltcG9ydCUyMHRvcmNoJTBBJTBBcGlwZWxpbmUlMjAlM0QlMjBIdW55dWFuRGlUUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUwOSUyMlRlbmNlbnQtSHVueXVhbiUyRkh1bnl1YW5EaVQtRGlmZnVzZXJzJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKS50byglMjJjdWRhJTIyKQ==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> HunyuanDiTPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| pipeline = HunyuanDiTPipeline.from_pretrained( | |
| <span class="hljs-string">"Tencent-Hunyuan/HunyuanDiT-Diffusers"</span>, torch_dtype=torch.float16 | |
| ).to(<span class="hljs-string">"cuda"</span>)`,wrap:!1}}),F=new me({props:{code:"cGlwZWxpbmUudHJhbnNmb3JtZXIudG8obWVtb3J5X2Zvcm1hdCUzRHRvcmNoLmNoYW5uZWxzX2xhc3QpJTBBcGlwZWxpbmUudmFlLnRvKG1lbW9yeV9mb3JtYXQlM0R0b3JjaC5jaGFubmVsc19sYXN0KQ==",highlighted:`pipeline.transformer.to(memory_format=torch.channels_last) | |
| pipeline.vae.to(memory_format=torch.channels_last)`,wrap:!1}}),q=new me({props:{code:"cGlwZWxpbmUudHJhbnNmb3JtZXIlMjAlM0QlMjB0b3JjaC5jb21waWxlKHBpcGVsaW5lLnRyYW5zZm9ybWVyJTJDJTIwbW9kZSUzRCUyMm1heC1hdXRvdHVuZSUyMiUyQyUyMGZ1bGxncmFwaCUzRFRydWUpJTBBcGlwZWxpbmUudmFlLmRlY29kZSUyMCUzRCUyMHRvcmNoLmNvbXBpbGUocGlwZWxpbmUudmFlLmRlY29kZSUyQyUyMG1vZGUlM0QlMjJtYXgtYXV0b3R1bmUlMjIlMkMlMjBmdWxsZ3JhcGglM0RUcnVlKSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0JTNEJTIyJUU0JUI4JTgwJUU0JUI4JUFBJUU1JUFFJTg3JUU4JTg4JUFBJUU1JTkxJTk4JUU1JTlDJUE4JUU5JUFBJTkxJUU5JUE5JUFDJTIyKS5pbWFnZXMlNUIwJTVE",highlighted:`pipeline.transformer = torch.<span class="hljs-built_in">compile</span>(pipeline.transformer, mode=<span class="hljs-string">"max-autotune"</span>, fullgraph=<span class="hljs-literal">True</span>) | |
| pipeline.vae.decode = torch.<span class="hljs-built_in">compile</span>(pipeline.vae.decode, mode=<span class="hljs-string">"max-autotune"</span>, fullgraph=<span class="hljs-literal">True</span>) | |
| image = pipeline(prompt=<span class="hljs-string">"一个宇航员在骑马"</span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),X=new me({props:{code:"V2l0aCUyMHRvcmNoLmNvbXBpbGUoKSUzQSUyMEF2ZXJhZ2UlMjBpbmZlcmVuY2UlMjB0aW1lJTNBJTIwMTIuNDcwJTIwc2Vjb25kcy4lMEFXaXRob3V0JTIwdG9yY2guY29tcGlsZSgpJTNBJTIwQXZlcmFnZSUyMGluZmVyZW5jZSUyMHRpbWUlM0ElMjAyMC41NzAlMjBzZWNvbmRzLg==",highlighted:`With torch.compile(): Average inference time: 12.470 seconds. | |
| Without torch.compile(): Average inference time: 20.570 seconds.`,wrap:!1}}),A=new _e({props:{title:"Memory optimization",local:"memory-optimization",headingTag:"h3"}}),ee=new me({props:{code:"JTJCJTIwcGlwZWxpbmUudHJhbnNmb3JtZXIuZW5hYmxlX2ZvcndhcmRfY2h1bmtpbmcoY2h1bmtfc2l6ZSUzRDElMkMlMjBkaW0lM0QxKQ==",highlighted:'<span class="hljs-addition">+ pipeline.transformer.enable_forward_chunking(chunk_size=1, dim=1)</span>',wrap:!1}}),te=new _e({props:{title:"HunyuanDiTPipeline",local:"diffusers.HunyuanDiTPipeline",headingTag:"h2"}}),ne=new tt({props:{name:"class diffusers.HunyuanDiTPipeline",anchor:"diffusers.HunyuanDiTPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": BertModel"},{name:"tokenizer",val:": BertTokenizer"},{name:"transformer",val:": HunyuanDiT2DModel"},{name:"scheduler",val:": DDPMScheduler"},{name:"safety_checker",val:": StableDiffusionSafetyChecker"},{name:"feature_extractor",val:": CLIPImageProcessor"},{name:"requires_safety_checker",val:": bool = True"},{name:"text_encoder_2",val:": typing.Optional[transformers.models.t5.modeling_t5.T5EncoderModel] = None"},{name:"tokenizer_2",val:": typing.Optional[transformers.models.mt5.tokenization_mt5.MT5Tokenizer] = None"}],parametersDescription:[{anchor:"diffusers.HunyuanDiTPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11105/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. We use | |
| <code>sdxl-vae-fp16-fix</code>.`,name:"vae"},{anchor:"diffusers.HunyuanDiTPipeline.text_encoder",description:`<strong>text_encoder</strong> (Optional[<code>~transformers.BertModel</code>, <code>~transformers.CLIPTextModel</code>]) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>). | |
| HunyuanDiT uses a fine-tuned [bilingual CLIP].`,name:"text_encoder"},{anchor:"diffusers.HunyuanDiTPipeline.tokenizer",description:`<strong>tokenizer</strong> (Optional[<code>~transformers.BertTokenizer</code>, <code>~transformers.CLIPTokenizer</code>]) — | |
| A <code>BertTokenizer</code> or <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.HunyuanDiTPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11105/en/api/models/hunyuan_transformer2d#diffusers.HunyuanDiT2DModel">HunyuanDiT2DModel</a>) — | |
| The HunyuanDiT model designed by Tencent Hunyuan.`,name:"transformer"},{anchor:"diffusers.HunyuanDiTPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| The mT5 embedder. Specifically, it is ‘t5-v1_1-xxl’.`,name:"text_encoder_2"},{anchor:"diffusers.HunyuanDiTPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>MT5Tokenizer</code>) — | |
| The tokenizer for the mT5 embedder.`,name:"tokenizer_2"},{anchor:"diffusers.HunyuanDiTPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11105/en/api/schedulers/ddpm#diffusers.DDPMScheduler">DDPMScheduler</a>) — | |
| A scheduler to be used in combination with HunyuanDiT to denoise the encoded image latents.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_11105/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py#L153"}}),oe=new tt({props:{name:"__call__",anchor:"diffusers.HunyuanDiTPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": typing.Optional[int] = 50"},{name:"guidance_scale",val:": typing.Optional[float] = 5.0"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": typing.Optional[float] = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_2",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_2",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask_2",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask_2",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback_on_step_end",val:": typing.Union[typing.Callable[[int, int, typing.Dict], NoneType], diffusers.callbacks.PipelineCallback, diffusers.callbacks.MultiPipelineCallbacks, NoneType] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"guidance_rescale",val:": float = 0.0"},{name:"original_size",val:": typing.Optional[typing.Tuple[int, int]] = (1024, 1024)"},{name:"target_size",val:": typing.Optional[typing.Tuple[int, int]] = None"},{name:"crops_coords_top_left",val:": typing.Tuple[int, int] = (0, 0)"},{name:"use_resolution_binning",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.HunyuanDiTPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. This parameter is modulated by <code>strength</code>.`,name:"num_inference_steps"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) from the <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">DDIM</a> paper. Only | |
| applies to the <a href="/docs/diffusers/pr_11105/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.prompt_embeds_2",description:`<strong>prompt_embeds_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds_2"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.negative_prompt_embeds_2",description:`<strong>negative_prompt_embeds_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds_2"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Attention mask for the prompt. Required when <code>prompt_embeds</code> is passed directly.`,name:"prompt_attention_mask"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.prompt_attention_mask_2",description:`<strong>prompt_attention_mask_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Attention mask for the prompt. Required when <code>prompt_embeds_2</code> is passed directly.`,name:"prompt_attention_mask_2"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Attention mask for the negative prompt. Required when <code>negative_prompt_embeds</code> is passed directly.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.negative_prompt_attention_mask_2",description:`<strong>negative_prompt_attention_mask_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Attention mask for the negative prompt. Required when <code>negative_prompt_embeds_2</code> is passed directly.`,name:"negative_prompt_attention_mask_2"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_11105/en/api/pipelines/stable_diffusion/gligen#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a | |
| plain tuple.`,name:"return_dict"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable[[int, int, Dict], None]</code>, <code>PipelineCallback</code>, <code>MultiPipelineCallbacks</code>, <em>optional</em>) — | |
| A callback function or a list of callback functions to be called at the end of each denoising step.`,name:"callback_on_step_end"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| A list of tensor inputs that should be passed to the callback function. If not defined, all tensor | |
| inputs will be passed.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Rescale the noise_cfg according to <code>guidance_rescale</code>. Based on findings of <a href="https://huggingface.co/papers/2305.08891" rel="nofollow">Common Diffusion Noise | |
| Schedules and Sample Steps are Flawed</a>. See Section 3.4`,name:"guidance_rescale"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.original_size",description:`<strong>original_size</strong> (<code>Tuple[int, int]</code>, <em>optional</em>, defaults to <code>(1024, 1024)</code>) — | |
| The original size of the image. Used to calculate the time ids.`,name:"original_size"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.target_size",description:`<strong>target_size</strong> (<code>Tuple[int, int]</code>, <em>optional</em>) — | |
| The target size of the image. Used to calculate the time ids.`,name:"target_size"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.crops_coords_top_left",description:`<strong>crops_coords_top_left</strong> (<code>Tuple[int, int]</code>, <em>optional</em>, defaults to <code>(0, 0)</code>) — | |
| The top left coordinates of the crop. Used to calculate the time ids.`,name:"crops_coords_top_left"},{anchor:"diffusers.HunyuanDiTPipeline.__call__.use_resolution_binning",description:`<strong>use_resolution_binning</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to use resolution binning or not. If <code>True</code>, the input resolution will be mapped to the closest | |
| standard resolution. Supported resolutions are 1024x1024, 1280x1280, 1024x768, 1152x864, 1280x960, | |
| 768x1024, 864x1152, 960x1280, 1280x768, and 768x1280. It is recommended to set this to <code>True</code>.`,name:"use_resolution_binning"}],source:"https://github.com/huggingface/diffusers/blob/vr_11105/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py#L572",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_11105/en/api/pipelines/stable_diffusion/gligen#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images and the | |
| second element is a list of <code>bool</code>s indicating whether the corresponding generated image contains | |
| “not-safe-for-work” (nsfw) content.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11105/en/api/pipelines/stable_diffusion/gligen#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),P=new Pt({props:{anchor:"diffusers.HunyuanDiTPipeline.__call__.example",$$slots:{default:[It]},$$scope:{ctx:D}}}),ie=new tt({props:{name:"encode_prompt",anchor:"diffusers.HunyuanDiTPipeline.encode_prompt",parameters:[{name:"prompt",val:": str"},{name:"device",val:": device = None"},{name:"dtype",val:": dtype = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"negative_prompt",val:": typing.Optional[str] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": typing.Optional[int] = None"},{name:"text_encoder_index",val:": int = 0"}],parametersDescription:[{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>) — | |
| torch dtype`,name:"dtype"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Attention mask for the prompt. Required when <code>prompt_embeds</code> is passed directly.`,name:"prompt_attention_mask"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Attention mask for the negative prompt. Required when <code>negative_prompt_embeds</code> is passed directly.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code>, <em>optional</em>) — maximum sequence length to use for the prompt.",name:"max_sequence_length"},{anchor:"diffusers.HunyuanDiTPipeline.encode_prompt.text_encoder_index",description:`<strong>text_encoder_index</strong> (<code>int</code>, <em>optional</em>) — | |
| Index of the text encoder to use. <code>0</code> for clip and <code>1</code> for T5.`,name:"text_encoder_index"}],source:"https://github.com/huggingface/diffusers/blob/vr_11105/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py#L252"}}),ae=new Ct({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/hunyuandit.md"}}),{c(){s=r("meta"),x=i(),d=r("p"),T=i(),m(w.$$.fragment),c=i(),$=r("p"),$.innerHTML=ot,he=i(),J=r("p"),J.innerHTML=it,ye=i(),j=r("p"),j.textContent=at,Te=i(),I=r("p"),I.innerHTML=st,be=i(),L=r("p"),L.innerHTML=rt,ve=i(),S=r("p"),S.innerHTML=lt,xe=i(),B=r("p"),B.textContent=pt,we=i(),z=r("ul"),z.innerHTML=dt,$e=i(),m(k.$$.fragment),Me=i(),m(U.$$.fragment),He=i(),m(N.$$.fragment),De=i(),G=r("p"),G.innerHTML=ct,ke=i(),m(Z.$$.fragment),Ue=i(),V=r("p"),V.innerHTML=mt,Pe=i(),W=r("p"),W.textContent=ut,Ce=i(),m(R.$$.fragment),Je=i(),O=r("p"),O.innerHTML=ft,je=i(),m(F.$$.fragment),Ie=i(),E=r("p"),E.textContent=gt,Le=i(),m(q.$$.fragment),Se=i(),Q=r("p"),Q.innerHTML=_t,Be=i(),m(X.$$.fragment),ze=i(),m(A.$$.fragment),Ne=i(),Y=r("p"),Y.innerHTML=ht,Ge=i(),K=r("p"),K.innerHTML=yt,Ze=i(),m(ee.$$.fragment),Ve=i(),m(te.$$.fragment),We=i(),y=r("div"),m(ne.$$.fragment),Ee=i(),re=r("p"),re.textContent=Tt,qe=i(),le=r("p"),le.innerHTML=bt,Qe=i(),pe=r("p"),pe.innerHTML=vt,Xe=i(),M=r("div"),m(oe.$$.fragment),Ae=i(),de=r("p"),de.textContent=xt,Ye=i(),m(P.$$.fragment),Ke=i(),C=r("div"),m(ie.$$.fragment),et=i(),ce=r("p"),ce.textContent=wt,Re=i(),m(ae.$$.fragment),Oe=i(),ue=r("p"),this.h()},l(e){const t=Ut("svelte-u9bgzb",document.head);s=l(t,"META",{name:!0,content:!0}),t.forEach(n),x=a(e),d=l(e,"P",{}),fe(d).forEach(n),T=a(e),u(w.$$.fragment,e),c=a(e),$=l(e,"P",{"data-svelte-h":!0}),p($)!=="svelte-2bc00t"&&($.innerHTML=ot),he=a(e),J=l(e,"P",{"data-svelte-h":!0}),p(J)!=="svelte-7etk87"&&(J.innerHTML=it),ye=a(e),j=l(e,"P",{"data-svelte-h":!0}),p(j)!=="svelte-1cwsb16"&&(j.textContent=at),Te=a(e),I=l(e,"P",{"data-svelte-h":!0}),p(I)!=="svelte-pnixkh"&&(I.innerHTML=st),be=a(e),L=l(e,"P",{"data-svelte-h":!0}),p(L)!=="svelte-14jgagb"&&(L.innerHTML=rt),ve=a(e),S=l(e,"P",{"data-svelte-h":!0}),p(S)!=="svelte-4ilgcj"&&(S.innerHTML=lt),xe=a(e),B=l(e,"P",{"data-svelte-h":!0}),p(B)!=="svelte-csifv3"&&(B.textContent=pt),we=a(e),z=l(e,"UL",{"data-svelte-h":!0}),p(z)!=="svelte-1jg2mmd"&&(z.innerHTML=dt),$e=a(e),u(k.$$.fragment,e),Me=a(e),u(U.$$.fragment,e),He=a(e),u(N.$$.fragment,e),De=a(e),G=l(e,"P",{"data-svelte-h":!0}),p(G)!=="svelte-ni7gdi"&&(G.innerHTML=ct),ke=a(e),u(Z.$$.fragment,e),Ue=a(e),V=l(e,"P",{"data-svelte-h":!0}),p(V)!=="svelte-iekg51"&&(V.innerHTML=mt),Pe=a(e),W=l(e,"P",{"data-svelte-h":!0}),p(W)!=="svelte-jub7f1"&&(W.textContent=ut),Ce=a(e),u(R.$$.fragment,e),Je=a(e),O=l(e,"P",{"data-svelte-h":!0}),p(O)!=="svelte-4294wb"&&(O.innerHTML=ft),je=a(e),u(F.$$.fragment,e),Ie=a(e),E=l(e,"P",{"data-svelte-h":!0}),p(E)!=="svelte-9i4prs"&&(E.textContent=gt),Le=a(e),u(q.$$.fragment,e),Se=a(e),Q=l(e,"P",{"data-svelte-h":!0}),p(Q)!=="svelte-1918cyp"&&(Q.innerHTML=_t),Be=a(e),u(X.$$.fragment,e),ze=a(e),u(A.$$.fragment,e),Ne=a(e),Y=l(e,"P",{"data-svelte-h":!0}),p(Y)!=="svelte-ieztxx"&&(Y.innerHTML=ht),Ge=a(e),K=l(e,"P",{"data-svelte-h":!0}),p(K)!=="svelte-728pe5"&&(K.innerHTML=yt),Ze=a(e),u(ee.$$.fragment,e),Ve=a(e),u(te.$$.fragment,e),We=a(e),y=l(e,"DIV",{class:!0});var v=fe(y);u(ne.$$.fragment,v),Ee=a(v),re=l(v,"P",{"data-svelte-h":!0}),p(re)!=="svelte-ue2deu"&&(re.textContent=Tt),qe=a(v),le=l(v,"P",{"data-svelte-h":!0}),p(le)!=="svelte-1wa7vhj"&&(le.innerHTML=bt),Qe=a(v),pe=l(v,"P",{"data-svelte-h":!0}),p(pe)!=="svelte-18ud7q1"&&(pe.innerHTML=vt),Xe=a(v),M=l(v,"DIV",{class:!0});var H=fe(M);u(oe.$$.fragment,H),Ae=a(H),de=l(H,"P",{"data-svelte-h":!0}),p(de)!=="svelte-1jqbgbl"&&(de.textContent=xt),Ye=a(H),u(P.$$.fragment,H),H.forEach(n),Ke=a(v),C=l(v,"DIV",{class:!0});var se=fe(C);u(ie.$$.fragment,se),et=a(se),ce=l(se,"P",{"data-svelte-h":!0}),p(ce)!=="svelte-16q0ax1"&&(ce.textContent=wt),se.forEach(n),v.forEach(n),Re=a(e),u(ae.$$.fragment,e),Oe=a(e),ue=l(e,"P",{}),fe(ue).forEach(n),this.h()},h(){ge(s,"name","hf:doc:metadata"),ge(s,"content",St),ge(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),ge(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),ge(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){b(document.head,s),o(e,x,t),o(e,d,t),o(e,T,t),f(w,e,t),o(e,c,t),o(e,$,t),o(e,he,t),o(e,J,t),o(e,ye,t),o(e,j,t),o(e,Te,t),o(e,I,t),o(e,be,t),o(e,L,t),o(e,ve,t),o(e,S,t),o(e,xe,t),o(e,B,t),o(e,we,t),o(e,z,t),o(e,$e,t),f(k,e,t),o(e,Me,t),f(U,e,t),o(e,He,t),f(N,e,t),o(e,De,t),o(e,G,t),o(e,ke,t),f(Z,e,t),o(e,Ue,t),o(e,V,t),o(e,Pe,t),o(e,W,t),o(e,Ce,t),f(R,e,t),o(e,Je,t),o(e,O,t),o(e,je,t),f(F,e,t),o(e,Ie,t),o(e,E,t),o(e,Le,t),f(q,e,t),o(e,Se,t),o(e,Q,t),o(e,Be,t),f(X,e,t),o(e,ze,t),f(A,e,t),o(e,Ne,t),o(e,Y,t),o(e,Ge,t),o(e,K,t),o(e,Ze,t),f(ee,e,t),o(e,Ve,t),f(te,e,t),o(e,We,t),o(e,y,t),f(ne,y,null),b(y,Ee),b(y,re),b(y,qe),b(y,le),b(y,Qe),b(y,pe),b(y,Xe),b(y,M),f(oe,M,null),b(M,Ae),b(M,de),b(M,Ye),f(P,M,null),b(y,Ke),b(y,C),f(ie,C,null),b(C,et),b(C,ce),o(e,Re,t),f(ae,e,t),o(e,Oe,t),o(e,ue,t),Fe=!0},p(e,[t]){const v={};t&2&&(v.$$scope={dirty:t,ctx:e}),k.$set(v);const H={};t&2&&(H.$$scope={dirty:t,ctx:e}),U.$set(H);const se={};t&2&&(se.$$scope={dirty:t,ctx:e}),P.$set(se)},i(e){Fe||(g(w.$$.fragment,e),g(k.$$.fragment,e),g(U.$$.fragment,e),g(N.$$.fragment,e),g(Z.$$.fragment,e),g(R.$$.fragment,e),g(F.$$.fragment,e),g(q.$$.fragment,e),g(X.$$.fragment,e),g(A.$$.fragment,e),g(ee.$$.fragment,e),g(te.$$.fragment,e),g(ne.$$.fragment,e),g(oe.$$.fragment,e),g(P.$$.fragment,e),g(ie.$$.fragment,e),g(ae.$$.fragment,e),Fe=!0)},o(e){_(w.$$.fragment,e),_(k.$$.fragment,e),_(U.$$.fragment,e),_(N.$$.fragment,e),_(Z.$$.fragment,e),_(R.$$.fragment,e),_(F.$$.fragment,e),_(q.$$.fragment,e),_(X.$$.fragment,e),_(A.$$.fragment,e),_(ee.$$.fragment,e),_(te.$$.fragment,e),_(ne.$$.fragment,e),_(oe.$$.fragment,e),_(P.$$.fragment,e),_(ie.$$.fragment,e),_(ae.$$.fragment,e),Fe=!1},d(e){e&&(n(x),n(d),n(T),n(c),n($),n(he),n(J),n(ye),n(j),n(Te),n(I),n(be),n(L),n(ve),n(S),n(xe),n(B),n(we),n(z),n($e),n(Me),n(He),n(De),n(G),n(ke),n(Ue),n(V),n(Pe),n(W),n(Ce),n(Je),n(O),n(je),n(Ie),n(E),n(Le),n(Se),n(Q),n(Be),n(ze),n(Ne),n(Y),n(Ge),n(K),n(Ze),n(Ve),n(We),n(y),n(Re),n(Oe),n(ue)),n(s),h(w,e),h(k,e),h(U,e),h(N,e),h(Z,e),h(R,e),h(F,e),h(q,e),h(X,e),h(A,e),h(ee,e),h(te,e),h(ne),h(oe),h(P),h(ie),h(ae,e)}}}const St='{"title":"Hunyuan-DiT","local":"hunyuan-dit","sections":[{"title":"Optimization","local":"optimization","sections":[{"title":"Inference","local":"inference","sections":[],"depth":3},{"title":"Memory optimization","local":"memory-optimization","sections":[],"depth":3}],"depth":2},{"title":"HunyuanDiTPipeline","local":"diffusers.HunyuanDiTPipeline","sections":[],"depth":2}],"depth":1}';function Bt(D){return Ht(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ot extends Dt{constructor(s){super(),kt(this,s,Bt,Lt,Mt,{})}}export{Ot as component}; | |
Xet Storage Details
- Size:
- 37.9 kB
- Xet hash:
- 932e170a86218a677031d429f782ba9675f317316118a03a668826eebca3cfc2
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.