Buckets:
| import{s as we,n as Ce,o as Le}from"../chunks/scheduler.8c3d61f6.js";import{S as Oe,i as Ne,g as a,s as n,r as m,A as qe,h as c,f as t,c as o,j as E,u as f,x as h,k as H,y as r,a as i,v as g,d as _,t as b,w as D}from"../chunks/index.da70eac4.js";import{D as j}from"../chunks/Docstring.6b390b9a.js";import{H as _e,E as Ie}from"../chunks/EditOnGithub.1e64e623.js";function Ee(be){let p,G,U,J,$,B,P,De='<a href="https://huggingface.co/papers/2006.11239" rel="nofollow">Denoising Diffusion Probabilistic Models</a> (DDPM) by Jonathan Ho, Ajay Jain and Pieter Abbeel proposes a diffusion based model of the same name. In the context of the 🤗 Diffusers library, DDPM refers to the discrete denoising scheduler from the paper as well as the pipeline.',K,M,ve="The abstract from the paper is:",Q,S,xe='<em>We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at <a href="https://github.com/hojonathanho/diffusion" rel="nofollow">this https URL</a>.</em>',X,T,Y,d,w,ae,A,ye="<code>DDPMScheduler</code> explores the connections between denoising score matching and Langevin dynamics sampling.",ce,z,$e=`This model inherits from <a href="/docs/diffusers/pr_10312/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a> and <a href="/docs/diffusers/pr_10312/en/api/configuration#diffusers.ConfigMixin">ConfigMixin</a>. Check the superclass documentation for the generic | |
| methods the library implements for all schedulers such as loading and saving.`,le,v,C,pe,V,Pe=`Ensures interchangeability with schedulers that need to scale the denoising model input depending on the | |
| current timestep.`,ue,x,L,me,F,Me="Sets the discrete timesteps used for the diffusion chain (to be run before inference).",fe,y,O,he,k,Se=`Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion | |
| process from the learned model outputs (most often the predicted noise).`,Z,N,ee,u,q,ge,R,Te="Output class for the scheduler’s <code>step</code> function output.",te,I,se,W,ne;return $=new _e({props:{title:"DDPMScheduler",local:"ddpmscheduler",headingTag:"h1"}}),T=new _e({props:{title:"DDPMScheduler",local:"diffusers.DDPMScheduler",headingTag:"h2"}}),w=new j({props:{name:"class diffusers.DDPMScheduler",anchor:"diffusers.DDPMScheduler",parameters:[{name:"num_train_timesteps",val:": int = 1000"},{name:"beta_start",val:": float = 0.0001"},{name:"beta_end",val:": float = 0.02"},{name:"beta_schedule",val:": str = 'linear'"},{name:"trained_betas",val:": typing.Union[numpy.ndarray, typing.List[float], NoneType] = None"},{name:"variance_type",val:": str = 'fixed_small'"},{name:"clip_sample",val:": bool = True"},{name:"prediction_type",val:": str = 'epsilon'"},{name:"thresholding",val:": bool = False"},{name:"dynamic_thresholding_ratio",val:": float = 0.995"},{name:"clip_sample_range",val:": float = 1.0"},{name:"sample_max_value",val:": float = 1.0"},{name:"timestep_spacing",val:": str = 'leading'"},{name:"steps_offset",val:": int = 0"},{name:"rescale_betas_zero_snr",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.DDPMScheduler.num_train_timesteps",description:`<strong>num_train_timesteps</strong> (<code>int</code>, defaults to 1000) — | |
| The number of diffusion steps to train the model.`,name:"num_train_timesteps"},{anchor:"diffusers.DDPMScheduler.beta_start",description:`<strong>beta_start</strong> (<code>float</code>, defaults to 0.0001) — | |
| The starting <code>beta</code> value of inference.`,name:"beta_start"},{anchor:"diffusers.DDPMScheduler.beta_end",description:`<strong>beta_end</strong> (<code>float</code>, defaults to 0.02) — | |
| The final <code>beta</code> value.`,name:"beta_end"},{anchor:"diffusers.DDPMScheduler.beta_schedule",description:`<strong>beta_schedule</strong> (<code>str</code>, defaults to <code>"linear"</code>) — | |
| The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from | |
| <code>linear</code>, <code>scaled_linear</code>, or <code>squaredcos_cap_v2</code>.`,name:"beta_schedule"},{anchor:"diffusers.DDPMScheduler.trained_betas",description:`<strong>trained_betas</strong> (<code>np.ndarray</code>, <em>optional</em>) — | |
| An array of betas to pass directly to the constructor without using <code>beta_start</code> and <code>beta_end</code>.`,name:"trained_betas"},{anchor:"diffusers.DDPMScheduler.variance_type",description:`<strong>variance_type</strong> (<code>str</code>, defaults to <code>"fixed_small"</code>) — | |
| Clip the variance when adding noise to the denoised sample. Choose from <code>fixed_small</code>, <code>fixed_small_log</code>, | |
| <code>fixed_large</code>, <code>fixed_large_log</code>, <code>learned</code> or <code>learned_range</code>.`,name:"variance_type"},{anchor:"diffusers.DDPMScheduler.clip_sample",description:`<strong>clip_sample</strong> (<code>bool</code>, defaults to <code>True</code>) — | |
| Clip the predicted sample for numerical stability.`,name:"clip_sample"},{anchor:"diffusers.DDPMScheduler.clip_sample_range",description:`<strong>clip_sample_range</strong> (<code>float</code>, defaults to 1.0) — | |
| The maximum magnitude for sample clipping. Valid only when <code>clip_sample=True</code>.`,name:"clip_sample_range"},{anchor:"diffusers.DDPMScheduler.prediction_type",description:`<strong>prediction_type</strong> (<code>str</code>, defaults to <code>epsilon</code>, <em>optional</em>) — | |
| Prediction type of the scheduler function; can be <code>epsilon</code> (predicts the noise of the diffusion process), | |
| <code>sample</code> (directly predicts the noisy sample<code>) or </code>v_prediction\` (see section 2.4 of <a href="https://imagen.research.google/video/paper.pdf" rel="nofollow">Imagen | |
| Video</a> paper).`,name:"prediction_type"},{anchor:"diffusers.DDPMScheduler.thresholding",description:`<strong>thresholding</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| Whether to use the “dynamic thresholding” method. This is unsuitable for latent-space diffusion models such | |
| as Stable Diffusion.`,name:"thresholding"},{anchor:"diffusers.DDPMScheduler.dynamic_thresholding_ratio",description:`<strong>dynamic_thresholding_ratio</strong> (<code>float</code>, defaults to 0.995) — | |
| The ratio for the dynamic thresholding method. Valid only when <code>thresholding=True</code>.`,name:"dynamic_thresholding_ratio"},{anchor:"diffusers.DDPMScheduler.sample_max_value",description:`<strong>sample_max_value</strong> (<code>float</code>, defaults to 1.0) — | |
| The threshold value for dynamic thresholding. Valid only when <code>thresholding=True</code>.`,name:"sample_max_value"},{anchor:"diffusers.DDPMScheduler.timestep_spacing",description:`<strong>timestep_spacing</strong> (<code>str</code>, defaults to <code>"leading"</code>) — | |
| The way the timesteps should be scaled. Refer to Table 2 of the <a href="https://huggingface.co/papers/2305.08891" rel="nofollow">Common Diffusion Noise Schedules and | |
| Sample Steps are Flawed</a> for more information.`,name:"timestep_spacing"},{anchor:"diffusers.DDPMScheduler.steps_offset",description:`<strong>steps_offset</strong> (<code>int</code>, defaults to 0) — | |
| An offset added to the inference steps, as required by some model families.`,name:"steps_offset"},{anchor:"diffusers.DDPMScheduler.rescale_betas_zero_snr",description:`<strong>rescale_betas_zero_snr</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and | |
| dark samples instead of limiting it to samples with medium brightness. Loosely related to | |
| <a href="https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506" rel="nofollow"><code>--offset_noise</code></a>.`,name:"rescale_betas_zero_snr"}],source:"https://github.com/huggingface/diffusers/blob/vr_10312/src/diffusers/schedulers/scheduling_ddpm.py#L129"}}),C=new j({props:{name:"scale_model_input",anchor:"diffusers.DDPMScheduler.scale_model_input",parameters:[{name:"sample",val:": Tensor"},{name:"timestep",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.DDPMScheduler.scale_model_input.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) — | |
| The input sample.`,name:"sample"},{anchor:"diffusers.DDPMScheduler.scale_model_input.timestep",description:`<strong>timestep</strong> (<code>int</code>, <em>optional</em>) — | |
| The current timestep in the diffusion chain.`,name:"timestep"}],source:"https://github.com/huggingface/diffusers/blob/vr_10312/src/diffusers/schedulers/scheduling_ddpm.py#L234",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A scaled input sample.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `}}),L=new j({props:{name:"set_timesteps",anchor:"diffusers.DDPMScheduler.set_timesteps",parameters:[{name:"num_inference_steps",val:": typing.Optional[int] = None"},{name:"device",val:": typing.Union[str, torch.device] = None"},{name:"timesteps",val:": typing.Optional[typing.List[int]] = None"}],parametersDescription:[{anchor:"diffusers.DDPMScheduler.set_timesteps.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>) — | |
| The number of diffusion steps used when generating samples with a pre-trained model. If used, | |
| <code>timesteps</code> must be <code>None</code>.`,name:"num_inference_steps"},{anchor:"diffusers.DDPMScheduler.set_timesteps.device",description:`<strong>device</strong> (<code>str</code> or <code>torch.device</code>, <em>optional</em>) — | |
| The device to which the timesteps should be moved to. If <code>None</code>, the timesteps are not moved.`,name:"device"},{anchor:"diffusers.DDPMScheduler.set_timesteps.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps used to support arbitrary spacing between timesteps. If <code>None</code>, then the default | |
| timestep spacing strategy of equal spacing between timesteps is used. If <code>timesteps</code> is passed, | |
| <code>num_inference_steps</code> must be <code>None</code>.`,name:"timesteps"}],source:"https://github.com/huggingface/diffusers/blob/vr_10312/src/diffusers/schedulers/scheduling_ddpm.py#L251"}}),O=new j({props:{name:"step",anchor:"diffusers.DDPMScheduler.step",parameters:[{name:"model_output",val:": Tensor"},{name:"timestep",val:": int"},{name:"sample",val:": Tensor"},{name:"generator",val:" = None"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.DDPMScheduler.step.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) — | |
| The direct output from learned diffusion model.`,name:"model_output"},{anchor:"diffusers.DDPMScheduler.step.timestep",description:`<strong>timestep</strong> (<code>float</code>) — | |
| The current discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.DDPMScheduler.step.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) — | |
| A current instance of a sample created by the diffusion process.`,name:"sample"},{anchor:"diffusers.DDPMScheduler.step.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) — | |
| A random number generator.`,name:"generator"},{anchor:"diffusers.DDPMScheduler.step.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_10312/en/api/schedulers/ddpm#diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput">DDPMSchedulerOutput</a> or <code>tuple</code>.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_10312/src/diffusers/schedulers/scheduling_ddpm.py#L399",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If return_dict is <code>True</code>, <a | |
| href="/docs/diffusers/pr_10312/en/api/schedulers/ddpm#diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" | |
| >DDPMSchedulerOutput</a> is returned, otherwise a | |
| tuple is returned where the first element is the sample tensor.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_10312/en/api/schedulers/ddpm#diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" | |
| >DDPMSchedulerOutput</a> or <code>tuple</code></p> | |
| `}}),N=new _e({props:{title:"DDPMSchedulerOutput",local:"diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",headingTag:"h2"}}),q=new j({props:{name:"class diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",anchor:"diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",parameters:[{name:"prev_sample",val:": Tensor"},{name:"pred_original_sample",val:": typing.Optional[torch.Tensor] = None"}],parametersDescription:[{anchor:"diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput.prev_sample",description:`<strong>prev_sample</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, num_channels, height, width)</code> for images) — | |
| Computed sample <code>(x_{t-1})</code> of previous timestep. <code>prev_sample</code> should be used as next model input in the | |
| denoising loop.`,name:"prev_sample"},{anchor:"diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput.pred_original_sample",description:`<strong>pred_original_sample</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, num_channels, height, width)</code> for images) — | |
| The predicted denoised sample <code>(x_{0})</code> based on the model output from the current timestep. | |
| <code>pred_original_sample</code> can be used to preview progress or for guidance.`,name:"pred_original_sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10312/src/diffusers/schedulers/scheduling_ddpm.py#L30"}}),I=new Ie({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/schedulers/ddpm.md"}}),{c(){p=a("meta"),G=n(),U=a("p"),J=n(),m($.$$.fragment),B=n(),P=a("p"),P.innerHTML=De,K=n(),M=a("p"),M.textContent=ve,Q=n(),S=a("p"),S.innerHTML=xe,X=n(),m(T.$$.fragment),Y=n(),d=a("div"),m(w.$$.fragment),ae=n(),A=a("p"),A.innerHTML=ye,ce=n(),z=a("p"),z.innerHTML=$e,le=n(),v=a("div"),m(C.$$.fragment),pe=n(),V=a("p"),V.textContent=Pe,ue=n(),x=a("div"),m(L.$$.fragment),me=n(),F=a("p"),F.textContent=Me,fe=n(),y=a("div"),m(O.$$.fragment),he=n(),k=a("p"),k.textContent=Se,Z=n(),m(N.$$.fragment),ee=n(),u=a("div"),m(q.$$.fragment),ge=n(),R=a("p"),R.innerHTML=Te,te=n(),m(I.$$.fragment),se=n(),W=a("p"),this.h()},l(e){const s=qe("svelte-u9bgzb",document.head);p=c(s,"META",{name:!0,content:!0}),s.forEach(t),G=o(e),U=c(e,"P",{}),E(U).forEach(t),J=o(e),f($.$$.fragment,e),B=o(e),P=c(e,"P",{"data-svelte-h":!0}),h(P)!=="svelte-18lgwlw"&&(P.innerHTML=De),K=o(e),M=c(e,"P",{"data-svelte-h":!0}),h(M)!=="svelte-1cwsb16"&&(M.textContent=ve),Q=o(e),S=c(e,"P",{"data-svelte-h":!0}),h(S)!=="svelte-ndelhy"&&(S.innerHTML=xe),X=o(e),f(T.$$.fragment,e),Y=o(e),d=c(e,"DIV",{class:!0});var l=E(d);f(w.$$.fragment,l),ae=o(l),A=c(l,"P",{"data-svelte-h":!0}),h(A)!=="svelte-s0ec9y"&&(A.innerHTML=ye),ce=o(l),z=c(l,"P",{"data-svelte-h":!0}),h(z)!=="svelte-1oywc2v"&&(z.innerHTML=$e),le=o(l),v=c(l,"DIV",{class:!0});var oe=E(v);f(C.$$.fragment,oe),pe=o(oe),V=c(oe,"P",{"data-svelte-h":!0}),h(V)!=="svelte-1rkfgpx"&&(V.textContent=Pe),oe.forEach(t),ue=o(l),x=c(l,"DIV",{class:!0});var re=E(x);f(L.$$.fragment,re),me=o(re),F=c(re,"P",{"data-svelte-h":!0}),h(F)!=="svelte-1vzm9q"&&(F.textContent=Me),re.forEach(t),fe=o(l),y=c(l,"DIV",{class:!0});var ie=E(y);f(O.$$.fragment,ie),he=o(ie),k=c(ie,"P",{"data-svelte-h":!0}),h(k)!=="svelte-hi84tp"&&(k.textContent=Se),ie.forEach(t),l.forEach(t),Z=o(e),f(N.$$.fragment,e),ee=o(e),u=c(e,"DIV",{class:!0});var de=E(u);f(q.$$.fragment,de),ge=o(de),R=c(de,"P",{"data-svelte-h":!0}),h(R)!=="svelte-id9kic"&&(R.innerHTML=Te),de.forEach(t),te=o(e),f(I.$$.fragment,e),se=o(e),W=c(e,"P",{}),E(W).forEach(t),this.h()},h(){H(p,"name","hf:doc:metadata"),H(p,"content",He),H(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),H(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),H(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),H(d,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),H(u,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,s){r(document.head,p),i(e,G,s),i(e,U,s),i(e,J,s),g($,e,s),i(e,B,s),i(e,P,s),i(e,K,s),i(e,M,s),i(e,Q,s),i(e,S,s),i(e,X,s),g(T,e,s),i(e,Y,s),i(e,d,s),g(w,d,null),r(d,ae),r(d,A),r(d,ce),r(d,z),r(d,le),r(d,v),g(C,v,null),r(v,pe),r(v,V),r(d,ue),r(d,x),g(L,x,null),r(x,me),r(x,F),r(d,fe),r(d,y),g(O,y,null),r(y,he),r(y,k),i(e,Z,s),g(N,e,s),i(e,ee,s),i(e,u,s),g(q,u,null),r(u,ge),r(u,R),i(e,te,s),g(I,e,s),i(e,se,s),i(e,W,s),ne=!0},p:Ce,i(e){ne||(_($.$$.fragment,e),_(T.$$.fragment,e),_(w.$$.fragment,e),_(C.$$.fragment,e),_(L.$$.fragment,e),_(O.$$.fragment,e),_(N.$$.fragment,e),_(q.$$.fragment,e),_(I.$$.fragment,e),ne=!0)},o(e){b($.$$.fragment,e),b(T.$$.fragment,e),b(w.$$.fragment,e),b(C.$$.fragment,e),b(L.$$.fragment,e),b(O.$$.fragment,e),b(N.$$.fragment,e),b(q.$$.fragment,e),b(I.$$.fragment,e),ne=!1},d(e){e&&(t(G),t(U),t(J),t(B),t(P),t(K),t(M),t(Q),t(S),t(X),t(Y),t(d),t(Z),t(ee),t(u),t(te),t(se),t(W)),t(p),D($,e),D(T,e),D(w),D(C),D(L),D(O),D(N,e),D(q),D(I,e)}}}const He='{"title":"DDPMScheduler","local":"ddpmscheduler","sections":[{"title":"DDPMScheduler","local":"diffusers.DDPMScheduler","sections":[],"depth":2},{"title":"DDPMSchedulerOutput","local":"diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput","sections":[],"depth":2}],"depth":1}';function Ae(be){return Le(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Re extends Oe{constructor(p){super(),Ne(this,p,Ae,Ee,we,{})}}export{Re as component}; | |
Xet Storage Details
- Size:
- 18.9 kB
- Xet hash:
- da89601d7c3a610261a6f6de58c1bdb634c78fea9170cff1bb7c3e2600dcb8e5
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.