Buckets:
| import{s as fe,n as me,o as he}from"../chunks/scheduler.8c3d61f6.js";import{S as ge,i as _e,g as i,s as r,r as M,A as De,h as a,f as t,c as d,j as E,u as y,x as q,k as N,y as n,a as l,v as $,d as S,t as I,w as T}from"../chunks/index.da70eac4.js";import{D as K}from"../chunks/Docstring.567bc132.js";import{H as ue,E as be}from"../chunks/index.5d4ab994.js";function ve(re){let p,z,O,A,h,F,g,de='<code>CogVideoXDDIMScheduler</code> is based on <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">Denoising Diffusion Implicit Models</a>, specifically for CogVideoX models.',k,_,U,o,D,Q,V,ie=`<code>DDIMScheduler</code> extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with | |
| non-Markovian guidance.`,Y,X,ae=`This model inherits from <a href="/docs/diffusers/pr_11234/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a> and <a href="/docs/diffusers/pr_11234/en/api/configuration#diffusers.ConfigMixin">ConfigMixin</a>. Check the superclass documentation for the generic | |
| methods the library implements for all schedulers such as loading and saving.`,Z,u,b,ee,w,ce=`Ensures interchangeability with schedulers that need to scale the denoising model input depending on the | |
| current timestep.`,te,f,v,oe,P,le="Sets the discrete timesteps used for the diffusion chain (to be run before inference).",se,m,x,ne,L,pe=`Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion | |
| process from the learned model outputs (most often the predicted noise).`,W,C,R,H,G;return h=new ue({props:{title:"CogVideoXDDIMScheduler",local:"cogvideoxddimscheduler",headingTag:"h1"}}),_=new ue({props:{title:"CogVideoXDDIMScheduler",local:"diffusers.CogVideoXDDIMScheduler",headingTag:"h2"}}),D=new K({props:{name:"class diffusers.CogVideoXDDIMScheduler",anchor:"diffusers.CogVideoXDDIMScheduler",parameters:[{name:"num_train_timesteps",val:": int = 1000"},{name:"beta_start",val:": float = 0.00085"},{name:"beta_end",val:": float = 0.012"},{name:"beta_schedule",val:": str = 'scaled_linear'"},{name:"trained_betas",val:": typing.Union[numpy.ndarray, typing.List[float], NoneType] = None"},{name:"clip_sample",val:": bool = True"},{name:"set_alpha_to_one",val:": bool = True"},{name:"steps_offset",val:": int = 0"},{name:"prediction_type",val:": str = 'epsilon'"},{name:"clip_sample_range",val:": float = 1.0"},{name:"sample_max_value",val:": float = 1.0"},{name:"timestep_spacing",val:": str = 'leading'"},{name:"rescale_betas_zero_snr",val:": bool = False"},{name:"snr_shift_scale",val:": float = 3.0"}],parametersDescription:[{anchor:"diffusers.CogVideoXDDIMScheduler.num_train_timesteps",description:`<strong>num_train_timesteps</strong> (<code>int</code>, defaults to 1000) — | |
| The number of diffusion steps to train the model.`,name:"num_train_timesteps"},{anchor:"diffusers.CogVideoXDDIMScheduler.beta_start",description:`<strong>beta_start</strong> (<code>float</code>, defaults to 0.0001) — | |
| The starting <code>beta</code> value of inference.`,name:"beta_start"},{anchor:"diffusers.CogVideoXDDIMScheduler.beta_end",description:`<strong>beta_end</strong> (<code>float</code>, defaults to 0.02) — | |
| The final <code>beta</code> value.`,name:"beta_end"},{anchor:"diffusers.CogVideoXDDIMScheduler.beta_schedule",description:`<strong>beta_schedule</strong> (<code>str</code>, defaults to <code>"linear"</code>) — | |
| The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from | |
| <code>linear</code>, <code>scaled_linear</code>, or <code>squaredcos_cap_v2</code>.`,name:"beta_schedule"},{anchor:"diffusers.CogVideoXDDIMScheduler.trained_betas",description:`<strong>trained_betas</strong> (<code>np.ndarray</code>, <em>optional</em>) — | |
| Pass an array of betas directly to the constructor to bypass <code>beta_start</code> and <code>beta_end</code>.`,name:"trained_betas"},{anchor:"diffusers.CogVideoXDDIMScheduler.clip_sample",description:`<strong>clip_sample</strong> (<code>bool</code>, defaults to <code>True</code>) — | |
| Clip the predicted sample for numerical stability.`,name:"clip_sample"},{anchor:"diffusers.CogVideoXDDIMScheduler.clip_sample_range",description:`<strong>clip_sample_range</strong> (<code>float</code>, defaults to 1.0) — | |
| The maximum magnitude for sample clipping. Valid only when <code>clip_sample=True</code>.`,name:"clip_sample_range"},{anchor:"diffusers.CogVideoXDDIMScheduler.set_alpha_to_one",description:`<strong>set_alpha_to_one</strong> (<code>bool</code>, defaults to <code>True</code>) — | |
| Each diffusion step uses the alphas product value at that step and at the previous one. For the final step | |
| there is no previous alpha. When this option is <code>True</code> the previous alpha product is fixed to <code>1</code>, | |
| otherwise it uses the alpha value at step 0.`,name:"set_alpha_to_one"},{anchor:"diffusers.CogVideoXDDIMScheduler.steps_offset",description:`<strong>steps_offset</strong> (<code>int</code>, defaults to 0) — | |
| An offset added to the inference steps, as required by some model families.`,name:"steps_offset"},{anchor:"diffusers.CogVideoXDDIMScheduler.prediction_type",description:`<strong>prediction_type</strong> (<code>str</code>, defaults to <code>epsilon</code>, <em>optional</em>) — | |
| Prediction type of the scheduler function; can be <code>epsilon</code> (predicts the noise of the diffusion process), | |
| <code>sample</code> (directly predicts the noisy sample<code>) or </code>v_prediction\` (see section 2.4 of <a href="https://imagen.research.google/video/paper.pdf" rel="nofollow">Imagen | |
| Video</a> paper).`,name:"prediction_type"},{anchor:"diffusers.CogVideoXDDIMScheduler.thresholding",description:`<strong>thresholding</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| Whether to use the “dynamic thresholding” method. This is unsuitable for latent-space diffusion models such | |
| as Stable Diffusion.`,name:"thresholding"},{anchor:"diffusers.CogVideoXDDIMScheduler.dynamic_thresholding_ratio",description:`<strong>dynamic_thresholding_ratio</strong> (<code>float</code>, defaults to 0.995) — | |
| The ratio for the dynamic thresholding method. Valid only when <code>thresholding=True</code>.`,name:"dynamic_thresholding_ratio"},{anchor:"diffusers.CogVideoXDDIMScheduler.sample_max_value",description:`<strong>sample_max_value</strong> (<code>float</code>, defaults to 1.0) — | |
| The threshold value for dynamic thresholding. Valid only when <code>thresholding=True</code>.`,name:"sample_max_value"},{anchor:"diffusers.CogVideoXDDIMScheduler.timestep_spacing",description:`<strong>timestep_spacing</strong> (<code>str</code>, defaults to <code>"leading"</code>) — | |
| The way the timesteps should be scaled. Refer to Table 2 of the <a href="https://huggingface.co/papers/2305.08891" rel="nofollow">Common Diffusion Noise Schedules and | |
| Sample Steps are Flawed</a> for more information.`,name:"timestep_spacing"},{anchor:"diffusers.CogVideoXDDIMScheduler.rescale_betas_zero_snr",description:`<strong>rescale_betas_zero_snr</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and | |
| dark samples instead of limiting it to samples with medium brightness. Loosely related to | |
| <a href="https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506" rel="nofollow"><code>--offset_noise</code></a>.`,name:"rescale_betas_zero_snr"}],source:"https://github.com/huggingface/diffusers/blob/vr_11234/src/diffusers/schedulers/scheduling_ddim_cogvideox.py#L126"}}),b=new K({props:{name:"scale_model_input",anchor:"diffusers.CogVideoXDDIMScheduler.scale_model_input",parameters:[{name:"sample",val:": Tensor"},{name:"timestep",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.CogVideoXDDIMScheduler.scale_model_input.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) — | |
| The input sample.`,name:"sample"},{anchor:"diffusers.CogVideoXDDIMScheduler.scale_model_input.timestep",description:`<strong>timestep</strong> (<code>int</code>, <em>optional</em>) — | |
| The current timestep in the diffusion chain.`,name:"timestep"}],source:"https://github.com/huggingface/diffusers/blob/vr_11234/src/diffusers/schedulers/scheduling_ddim_cogvideox.py#L243",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A scaled input sample.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `}}),v=new K({props:{name:"set_timesteps",anchor:"diffusers.CogVideoXDDIMScheduler.set_timesteps",parameters:[{name:"num_inference_steps",val:": int"},{name:"device",val:": typing.Union[str, torch.device] = None"}],parametersDescription:[{anchor:"diffusers.CogVideoXDDIMScheduler.set_timesteps.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>) — | |
| The number of diffusion steps used when generating samples with a pre-trained model.`,name:"num_inference_steps"}],source:"https://github.com/huggingface/diffusers/blob/vr_11234/src/diffusers/schedulers/scheduling_ddim_cogvideox.py#L260"}}),x=new K({props:{name:"step",anchor:"diffusers.CogVideoXDDIMScheduler.step",parameters:[{name:"model_output",val:": Tensor"},{name:"timestep",val:": int"},{name:"sample",val:": Tensor"},{name:"eta",val:": float = 0.0"},{name:"use_clipped_model_output",val:": bool = False"},{name:"generator",val:" = None"},{name:"variance_noise",val:": typing.Optional[torch.Tensor] = None"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.CogVideoXDDIMScheduler.step.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) — | |
| The direct output from learned diffusion model.`,name:"model_output"},{anchor:"diffusers.CogVideoXDDIMScheduler.step.timestep",description:`<strong>timestep</strong> (<code>float</code>) — | |
| The current discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.CogVideoXDDIMScheduler.step.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) — | |
| A current instance of a sample created by the diffusion process.`,name:"sample"},{anchor:"diffusers.CogVideoXDDIMScheduler.step.eta",description:`<strong>eta</strong> (<code>float</code>) — | |
| The weight of noise for added noise in diffusion step.`,name:"eta"},{anchor:"diffusers.CogVideoXDDIMScheduler.step.use_clipped_model_output",description:`<strong>use_clipped_model_output</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If <code>True</code>, computes “corrected” <code>model_output</code> from the clipped predicted original sample. Necessary | |
| because predicted original sample is clipped to [-1, 1] when <code>self.config.clip_sample</code> is <code>True</code>. If no | |
| clipping has happened, “corrected” <code>model_output</code> would coincide with the one provided as input and | |
| <code>use_clipped_model_output</code> has no effect.`,name:"use_clipped_model_output"},{anchor:"diffusers.CogVideoXDDIMScheduler.step.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) — | |
| A random number generator.`,name:"generator"},{anchor:"diffusers.CogVideoXDDIMScheduler.step.variance_noise",description:`<strong>variance_noise</strong> (<code>torch.Tensor</code>) — | |
| Alternative to generating noise with <code>generator</code> by directly providing the noise for the variance | |
| itself. Useful for methods such as <code>CycleDiffusion</code>.`,name:"variance_noise"},{anchor:"diffusers.CogVideoXDDIMScheduler.step.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_11234/en/api/schedulers/ddim#diffusers.schedulers.scheduling_ddim.DDIMSchedulerOutput">DDIMSchedulerOutput</a> or <code>tuple</code>.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_11234/src/diffusers/schedulers/scheduling_ddim_cogvideox.py#L305",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If return_dict is <code>True</code>, <a | |
| href="/docs/diffusers/pr_11234/en/api/schedulers/ddim#diffusers.schedulers.scheduling_ddim.DDIMSchedulerOutput" | |
| >DDIMSchedulerOutput</a> is returned, otherwise a | |
| tuple is returned where the first element is the sample tensor.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11234/en/api/schedulers/ddim#diffusers.schedulers.scheduling_ddim.DDIMSchedulerOutput" | |
| >DDIMSchedulerOutput</a> or <code>tuple</code></p> | |
| `}}),C=new be({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/schedulers/ddim_cogvideox.md"}}),{c(){p=i("meta"),z=r(),O=i("p"),A=r(),M(h.$$.fragment),F=r(),g=i("p"),g.innerHTML=de,k=r(),M(_.$$.fragment),U=r(),o=i("div"),M(D.$$.fragment),Q=r(),V=i("p"),V.innerHTML=ie,Y=r(),X=i("p"),X.innerHTML=ae,Z=r(),u=i("div"),M(b.$$.fragment),ee=r(),w=i("p"),w.textContent=ce,te=r(),f=i("div"),M(v.$$.fragment),oe=r(),P=i("p"),P.textContent=le,se=r(),m=i("div"),M(x.$$.fragment),ne=r(),L=i("p"),L.textContent=pe,W=r(),M(C.$$.fragment),R=r(),H=i("p"),this.h()},l(e){const s=De("svelte-u9bgzb",document.head);p=a(s,"META",{name:!0,content:!0}),s.forEach(t),z=d(e),O=a(e,"P",{}),E(O).forEach(t),A=d(e),y(h.$$.fragment,e),F=d(e),g=a(e,"P",{"data-svelte-h":!0}),q(g)!=="svelte-w3gaf8"&&(g.innerHTML=de),k=d(e),y(_.$$.fragment,e),U=d(e),o=a(e,"DIV",{class:!0});var c=E(o);y(D.$$.fragment,c),Q=d(c),V=a(c,"P",{"data-svelte-h":!0}),q(V)!=="svelte-1wae91k"&&(V.innerHTML=ie),Y=d(c),X=a(c,"P",{"data-svelte-h":!0}),q(X)!=="svelte-16my9bb"&&(X.innerHTML=ae),Z=d(c),u=a(c,"DIV",{class:!0});var j=E(u);y(b.$$.fragment,j),ee=d(j),w=a(j,"P",{"data-svelte-h":!0}),q(w)!=="svelte-1rkfgpx"&&(w.textContent=ce),j.forEach(t),te=d(c),f=a(c,"DIV",{class:!0});var B=E(f);y(v.$$.fragment,B),oe=d(B),P=a(B,"P",{"data-svelte-h":!0}),q(P)!=="svelte-1vzm9q"&&(P.textContent=le),B.forEach(t),se=d(c),m=a(c,"DIV",{class:!0});var J=E(m);y(x.$$.fragment,J),ne=d(J),L=a(J,"P",{"data-svelte-h":!0}),q(L)!=="svelte-hi84tp"&&(L.textContent=pe),J.forEach(t),c.forEach(t),W=d(e),y(C.$$.fragment,e),R=d(e),H=a(e,"P",{}),E(H).forEach(t),this.h()},h(){N(p,"name","hf:doc:metadata"),N(p,"content",xe),N(u,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),N(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),N(m,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),N(o,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,s){n(document.head,p),l(e,z,s),l(e,O,s),l(e,A,s),$(h,e,s),l(e,F,s),l(e,g,s),l(e,k,s),$(_,e,s),l(e,U,s),l(e,o,s),$(D,o,null),n(o,Q),n(o,V),n(o,Y),n(o,X),n(o,Z),n(o,u),$(b,u,null),n(u,ee),n(u,w),n(o,te),n(o,f),$(v,f,null),n(f,oe),n(f,P),n(o,se),n(o,m),$(x,m,null),n(m,ne),n(m,L),l(e,W,s),$(C,e,s),l(e,R,s),l(e,H,s),G=!0},p:me,i(e){G||(S(h.$$.fragment,e),S(_.$$.fragment,e),S(D.$$.fragment,e),S(b.$$.fragment,e),S(v.$$.fragment,e),S(x.$$.fragment,e),S(C.$$.fragment,e),G=!0)},o(e){I(h.$$.fragment,e),I(_.$$.fragment,e),I(D.$$.fragment,e),I(b.$$.fragment,e),I(v.$$.fragment,e),I(x.$$.fragment,e),I(C.$$.fragment,e),G=!1},d(e){e&&(t(z),t(O),t(A),t(F),t(g),t(k),t(U),t(o),t(W),t(R),t(H)),t(p),T(h,e),T(_,e),T(D),T(b),T(v),T(x),T(C,e)}}}const xe='{"title":"CogVideoXDDIMScheduler","local":"cogvideoxddimscheduler","sections":[{"title":"CogVideoXDDIMScheduler","local":"diffusers.CogVideoXDDIMScheduler","sections":[],"depth":2}],"depth":1}';function Ce(re){return he(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ie extends ge{constructor(p){super(),_e(this,p,Ce,ve,fe,{})}}export{Ie as component}; | |
Xet Storage Details
- Size:
- 16.2 kB
- Xet hash:
- d3eaf42b0701be92c15d101208cdcb3fae458033ae037cf2a091d935b08206af
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.