Buckets:
| import{s as me,n as fe,o as he}from"../chunks/scheduler.8c3d61f6.js";import{S as ge,i as _e,g as a,s as o,r as $,A as ve,h as d,f as t,c as i,j as H,u as M,x as z,k as N,y as r,a as l,v as S,d as T,t as y,w}from"../chunks/index.da70eac4.js";import{D as Q}from"../chunks/Docstring.ee4b6913.js";import{H as ue,E as be}from"../chunks/EditOnGithub.1e64e623.js";function De(oe){let p,k,A,F,h,O,g,ie=`<code>DDIMInverseScheduler</code> is the inverted scheduler from <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">Denoising Diffusion Implicit Models</a> (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon. | |
| The implementation is mostly based on the DDIM inversion definition from <a href="https://huggingface.co/papers/2211.09794" rel="nofollow">Null-text Inversion for Editing Real Images using Guided Diffusion Models</a>.`,R,_,U,s,v,X,C,ae='<code>DDIMInverseScheduler</code> is the reverse scheduler of <a href="/docs/diffusers/main/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>.',Y,E,de=`This model inherits from <a href="/docs/diffusers/main/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a> and <a href="/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin">ConfigMixin</a>. Check the superclass documentation for the generic | |
| methods the library implements for all schedulers such as loading and saving.`,Z,u,b,ee,P,ce=`Ensures interchangeability with schedulers that need to scale the denoising model input depending on the | |
| current timestep.`,te,m,D,se,L,le="Sets the discrete timesteps used for the diffusion chain (to be run before inference).",ne,f,I,re,q,pe=`Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion | |
| process from the learned model outputs (most often the predicted noise).`,W,x,G,V,j;return h=new ue({props:{title:"DDIMInverseScheduler",local:"ddiminversescheduler",headingTag:"h1"}}),_=new ue({props:{title:"DDIMInverseScheduler",local:"diffusers.DDIMInverseScheduler",headingTag:"h2"}}),v=new Q({props:{name:"class diffusers.DDIMInverseScheduler",anchor:"diffusers.DDIMInverseScheduler",parameters:[{name:"num_train_timesteps",val:": int = 1000"},{name:"beta_start",val:": float = 0.0001"},{name:"beta_end",val:": float = 0.02"},{name:"beta_schedule",val:": str = 'linear'"},{name:"trained_betas",val:": Union = None"},{name:"clip_sample",val:": bool = True"},{name:"set_alpha_to_one",val:": bool = True"},{name:"steps_offset",val:": int = 0"},{name:"prediction_type",val:": str = 'epsilon'"},{name:"clip_sample_range",val:": float = 1.0"},{name:"timestep_spacing",val:": str = 'leading'"},{name:"rescale_betas_zero_snr",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DDIMInverseScheduler.num_train_timesteps",description:`<strong>num_train_timesteps</strong> (<code>int</code>, defaults to 1000) — | |
| The number of diffusion steps to train the model.`,name:"num_train_timesteps"},{anchor:"diffusers.DDIMInverseScheduler.beta_start",description:`<strong>beta_start</strong> (<code>float</code>, defaults to 0.0001) — | |
| The starting <code>beta</code> value of inference.`,name:"beta_start"},{anchor:"diffusers.DDIMInverseScheduler.beta_end",description:`<strong>beta_end</strong> (<code>float</code>, defaults to 0.02) — | |
| The final <code>beta</code> value.`,name:"beta_end"},{anchor:"diffusers.DDIMInverseScheduler.beta_schedule",description:`<strong>beta_schedule</strong> (<code>str</code>, defaults to <code>"linear"</code>) — | |
| The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from | |
| <code>linear</code>, <code>scaled_linear</code>, or <code>squaredcos_cap_v2</code>.`,name:"beta_schedule"},{anchor:"diffusers.DDIMInverseScheduler.trained_betas",description:`<strong>trained_betas</strong> (<code>np.ndarray</code>, <em>optional</em>) — | |
| Pass an array of betas directly to the constructor to bypass <code>beta_start</code> and <code>beta_end</code>.`,name:"trained_betas"},{anchor:"diffusers.DDIMInverseScheduler.clip_sample",description:`<strong>clip_sample</strong> (<code>bool</code>, defaults to <code>True</code>) — | |
| Clip the predicted sample for numerical stability.`,name:"clip_sample"},{anchor:"diffusers.DDIMInverseScheduler.clip_sample_range",description:`<strong>clip_sample_range</strong> (<code>float</code>, defaults to 1.0) — | |
| The maximum magnitude for sample clipping. Valid only when <code>clip_sample=True</code>.`,name:"clip_sample_range"},{anchor:"diffusers.DDIMInverseScheduler.set_alpha_to_one",description:`<strong>set_alpha_to_one</strong> (<code>bool</code>, defaults to <code>True</code>) — | |
| Each diffusion step uses the alphas product value at that step and at the previous one. For the final step | |
| there is no previous alpha. When this option is <code>True</code> the previous alpha product is fixed to 0, otherwise | |
| it uses the alpha value at step <code>num_train_timesteps - 1</code>.`,name:"set_alpha_to_one"},{anchor:"diffusers.DDIMInverseScheduler.steps_offset",description:`<strong>steps_offset</strong> (<code>int</code>, defaults to 0) — | |
| An offset added to the inference steps, as required by some model families.`,name:"steps_offset"},{anchor:"diffusers.DDIMInverseScheduler.prediction_type",description:`<strong>prediction_type</strong> (<code>str</code>, defaults to <code>epsilon</code>, <em>optional</em>) — | |
| Prediction type of the scheduler function; can be <code>epsilon</code> (predicts the noise of the diffusion process), | |
| <code>sample</code> (directly predicts the noisy sample<code>) or </code>v_prediction\` (see section 2.4 of <a href="https://imagen.research.google/video/paper.pdf" rel="nofollow">Imagen | |
| Video</a> paper).`,name:"prediction_type"},{anchor:"diffusers.DDIMInverseScheduler.timestep_spacing",description:`<strong>timestep_spacing</strong> (<code>str</code>, defaults to <code>"leading"</code>) — | |
| The way the timesteps should be scaled. Refer to Table 2 of the <a href="https://huggingface.co/papers/2305.08891" rel="nofollow">Common Diffusion Noise Schedules and | |
| Sample Steps are Flawed</a> for more information.`,name:"timestep_spacing"},{anchor:"diffusers.DDIMInverseScheduler.rescale_betas_zero_snr",description:`<strong>rescale_betas_zero_snr</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and | |
| dark samples instead of limiting it to samples with medium brightness. Loosely related to | |
| <a href="https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506" rel="nofollow"><code>--offset_noise</code></a>.`,name:"rescale_betas_zero_snr"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddim_inverse.py#L130"}}),b=new Q({props:{name:"scale_model_input",anchor:"diffusers.DDIMInverseScheduler.scale_model_input",parameters:[{name:"sample",val:": Tensor"},{name:"timestep",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.DDIMInverseScheduler.scale_model_input.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) — | |
| The input sample.`,name:"sample"},{anchor:"diffusers.DDIMInverseScheduler.scale_model_input.timestep",description:`<strong>timestep</strong> (<code>int</code>, <em>optional</em>) — | |
| The current timestep in the diffusion chain.`,name:"timestep"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddim_inverse.py#L234",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A scaled input sample.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `}}),D=new Q({props:{name:"set_timesteps",anchor:"diffusers.DDIMInverseScheduler.set_timesteps",parameters:[{name:"num_inference_steps",val:": int"},{name:"device",val:": Union = None"}],parametersDescription:[{anchor:"diffusers.DDIMInverseScheduler.set_timesteps.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>) — | |
| The number of diffusion steps used when generating samples with a pre-trained model.`,name:"num_inference_steps"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddim_inverse.py#L251"}}),I=new Q({props:{name:"step",anchor:"diffusers.DDIMInverseScheduler.step",parameters:[{name:"model_output",val:": Tensor"},{name:"timestep",val:": int"},{name:"sample",val:": Tensor"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.DDIMInverseScheduler.step.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) — | |
| The direct output from learned diffusion model.`,name:"model_output"},{anchor:"diffusers.DDIMInverseScheduler.step.timestep",description:`<strong>timestep</strong> (<code>float</code>) — | |
| The current discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.DDIMInverseScheduler.step.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) — | |
| A current instance of a sample created by the diffusion process.`,name:"sample"},{anchor:"diffusers.DDIMInverseScheduler.step.eta",description:`<strong>eta</strong> (<code>float</code>) — | |
| The weight of noise for added noise in diffusion step.`,name:"eta"},{anchor:"diffusers.DDIMInverseScheduler.step.use_clipped_model_output",description:`<strong>use_clipped_model_output</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If <code>True</code>, computes “corrected” <code>model_output</code> from the clipped predicted original sample. Necessary | |
| because predicted original sample is clipped to [-1, 1] when <code>self.config.clip_sample</code> is <code>True</code>. If no | |
| clipping has happened, “corrected” <code>model_output</code> would coincide with the one provided as input and | |
| <code>use_clipped_model_output</code> has no effect.`,name:"use_clipped_model_output"},{anchor:"diffusers.DDIMInverseScheduler.step.variance_noise",description:`<strong>variance_noise</strong> (<code>torch.Tensor</code>) — | |
| Alternative to generating noise with <code>generator</code> by directly providing the noise for the variance | |
| itself. Useful for methods such as <code>CycleDiffusion</code>.`,name:"variance_noise"},{anchor:"diffusers.DDIMInverseScheduler.step.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput</code> or | |
| <code>tuple</code>.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddim_inverse.py#L289",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If return_dict is <code>True</code>, <code>~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput</code> is | |
| returned, otherwise a tuple is returned where the first element is the sample tensor.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput</code> or <code>tuple</code></p> | |
| `}}),x=new be({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/schedulers/ddim_inverse.md"}}),{c(){p=a("meta"),k=o(),A=a("p"),F=o(),$(h.$$.fragment),O=o(),g=a("p"),g.innerHTML=ie,R=o(),$(_.$$.fragment),U=o(),s=a("div"),$(v.$$.fragment),X=o(),C=a("p"),C.innerHTML=ae,Y=o(),E=a("p"),E.innerHTML=de,Z=o(),u=a("div"),$(b.$$.fragment),ee=o(),P=a("p"),P.textContent=ce,te=o(),m=a("div"),$(D.$$.fragment),se=o(),L=a("p"),L.textContent=le,ne=o(),f=a("div"),$(I.$$.fragment),re=o(),q=a("p"),q.textContent=pe,W=o(),$(x.$$.fragment),G=o(),V=a("p"),this.h()},l(e){const n=ve("svelte-u9bgzb",document.head);p=d(n,"META",{name:!0,content:!0}),n.forEach(t),k=i(e),A=d(e,"P",{}),H(A).forEach(t),F=i(e),M(h.$$.fragment,e),O=i(e),g=d(e,"P",{"data-svelte-h":!0}),z(g)!=="svelte-1s84ru"&&(g.innerHTML=ie),R=i(e),M(_.$$.fragment,e),U=i(e),s=d(e,"DIV",{class:!0});var c=H(s);M(v.$$.fragment,c),X=i(c),C=d(c,"P",{"data-svelte-h":!0}),z(C)!=="svelte-1ulcci3"&&(C.innerHTML=ae),Y=i(c),E=d(c,"P",{"data-svelte-h":!0}),z(E)!=="svelte-linuuh"&&(E.innerHTML=de),Z=i(c),u=d(c,"DIV",{class:!0});var J=H(u);M(b.$$.fragment,J),ee=i(J),P=d(J,"P",{"data-svelte-h":!0}),z(P)!=="svelte-1rkfgpx"&&(P.textContent=ce),J.forEach(t),te=i(c),m=d(c,"DIV",{class:!0});var B=H(m);M(D.$$.fragment,B),se=i(B),L=d(B,"P",{"data-svelte-h":!0}),z(L)!=="svelte-1vzm9q"&&(L.textContent=le),B.forEach(t),ne=i(c),f=d(c,"DIV",{class:!0});var K=H(f);M(I.$$.fragment,K),re=i(K),q=d(K,"P",{"data-svelte-h":!0}),z(q)!=="svelte-hi84tp"&&(q.textContent=pe),K.forEach(t),c.forEach(t),W=i(e),M(x.$$.fragment,e),G=i(e),V=d(e,"P",{}),H(V).forEach(t),this.h()},h(){N(p,"name","hf:doc:metadata"),N(p,"content",Ie),N(u,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),N(m,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),N(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),N(s,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,n){r(document.head,p),l(e,k,n),l(e,A,n),l(e,F,n),S(h,e,n),l(e,O,n),l(e,g,n),l(e,R,n),S(_,e,n),l(e,U,n),l(e,s,n),S(v,s,null),r(s,X),r(s,C),r(s,Y),r(s,E),r(s,Z),r(s,u),S(b,u,null),r(u,ee),r(u,P),r(s,te),r(s,m),S(D,m,null),r(m,se),r(m,L),r(s,ne),r(s,f),S(I,f,null),r(f,re),r(f,q),l(e,W,n),S(x,e,n),l(e,G,n),l(e,V,n),j=!0},p:fe,i(e){j||(T(h.$$.fragment,e),T(_.$$.fragment,e),T(v.$$.fragment,e),T(b.$$.fragment,e),T(D.$$.fragment,e),T(I.$$.fragment,e),T(x.$$.fragment,e),j=!0)},o(e){y(h.$$.fragment,e),y(_.$$.fragment,e),y(v.$$.fragment,e),y(b.$$.fragment,e),y(D.$$.fragment,e),y(I.$$.fragment,e),y(x.$$.fragment,e),j=!1},d(e){e&&(t(k),t(A),t(F),t(O),t(g),t(R),t(U),t(s),t(W),t(G),t(V)),t(p),w(h,e),w(_,e),w(v),w(b),w(D),w(I),w(x,e)}}}const Ie='{"title":"DDIMInverseScheduler","local":"ddiminversescheduler","sections":[{"title":"DDIMInverseScheduler","local":"diffusers.DDIMInverseScheduler","sections":[],"depth":2}],"depth":1}';function xe(oe){return he(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ye extends ge{constructor(p){super(),_e(this,p,xe,De,me,{})}}export{ye as component}; | |
Xet Storage Details
- Size:
- 14.7 kB
- Xet hash:
- eeefb32220c96c3f28a4c3911b9fb29d84c53aba20f3936a5c837d0c2e9cfd97
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.