Buckets:

rtrm's picture
download
raw
12.1 kB
import{s as we,n as Ne,o as Ce}from"../chunks/scheduler.8c3d61f6.js";import{S as Le,i as Ee,g as d,s as n,r as l,A as Oe,h as a,f as t,c as i,j as P,u as p,x as D,k as M,y as s,a as c,v as m,d as f,t as h,w as g}from"../chunks/index.da70eac4.js";import{D as G}from"../chunks/Docstring.6b390b9a.js";import{H as be,E as ke}from"../chunks/EditOnGithub.1e64e623.js";function He($e){let _,F,R,J,y,K,T,xe='<code>IPNDMScheduler</code> is a fourth-order Improved Pseudo Linear Multistep scheduler. The original implementation can be found at <a href="https://github.com/crowsonkb/v-diffusion-pytorch/blob/987f8985e38208345c1959b0ea767a625831cc9b/diffusion/sampling.py#L296" rel="nofollow">crowsonkb/v-diffusion-pytorch</a>.',Q,I,X,o,w,ae,V,Se="A fourth-order Improved Pseudo Linear Multistep scheduler.",ue,A,Pe=`This model inherits from <a href="/docs/diffusers/pr_10175/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a> and <a href="/docs/diffusers/pr_10175/en/api/configuration#diffusers.ConfigMixin">ConfigMixin</a>. Check the superclass documentation for the generic
methods the library implements for all schedulers such as loading and saving.`,ce,b,N,le,z,De=`Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.`,pe,$,C,me,U,Me="Sets the begin index for the scheduler. This function should be run from pipeline before the inference.",fe,x,L,he,j,ye="Sets the discrete timesteps used for the diffusion chain (to be run before inference).",ge,S,E,_e,q,Te=`Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with
the linear multistep method. It performs one forward pass multiple times to approximate the solution.`,Y,O,Z,v,k,ve,B,Ie="Base class for the output of a scheduler’s <code>step</code> function.",ee,H,te,W,se;return y=new be({props:{title:"IPNDMScheduler",local:"ipndmscheduler",headingTag:"h1"}}),I=new be({props:{title:"IPNDMScheduler",local:"diffusers.IPNDMScheduler",headingTag:"h2"}}),w=new G({props:{name:"class diffusers.IPNDMScheduler",anchor:"diffusers.IPNDMScheduler",parameters:[{name:"num_train_timesteps",val:": int = 1000"},{name:"trained_betas",val:": typing.Union[numpy.ndarray, typing.List[float], NoneType] = None"}],parametersDescription:[{anchor:"diffusers.IPNDMScheduler.num_train_timesteps",description:`<strong>num_train_timesteps</strong> (<code>int</code>, defaults to 1000) &#x2014;
The number of diffusion steps to train the model.`,name:"num_train_timesteps"},{anchor:"diffusers.IPNDMScheduler.trained_betas",description:`<strong>trained_betas</strong> (<code>np.ndarray</code>, <em>optional</em>) &#x2014;
Pass an array of betas directly to the constructor to bypass <code>beta_start</code> and <code>beta_end</code>.`,name:"trained_betas"}],source:"https://github.com/huggingface/diffusers/blob/vr_10175/src/diffusers/schedulers/scheduling_ipndm.py#L25"}}),N=new G({props:{name:"scale_model_input",anchor:"diffusers.IPNDMScheduler.scale_model_input",parameters:[{name:"sample",val:": Tensor"},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.IPNDMScheduler.scale_model_input.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
The input sample.`,name:"sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10175/src/diffusers/schedulers/scheduling_ipndm.py#L196",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>A scaled input sample.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),C=new G({props:{name:"set_begin_index",anchor:"diffusers.IPNDMScheduler.set_begin_index",parameters:[{name:"begin_index",val:": int = 0"}],parametersDescription:[{anchor:"diffusers.IPNDMScheduler.set_begin_index.begin_index",description:`<strong>begin_index</strong> (<code>int</code>) &#x2014;
The begin index for the scheduler.`,name:"begin_index"}],source:"https://github.com/huggingface/diffusers/blob/vr_10175/src/diffusers/schedulers/scheduling_ipndm.py#L76"}}),L=new G({props:{name:"set_timesteps",anchor:"diffusers.IPNDMScheduler.set_timesteps",parameters:[{name:"num_inference_steps",val:": int"},{name:"device",val:": typing.Union[str, torch.device] = None"}],parametersDescription:[{anchor:"diffusers.IPNDMScheduler.set_timesteps.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>) &#x2014;
The number of diffusion steps used when generating samples with a pre-trained model.`,name:"num_inference_steps"},{anchor:"diffusers.IPNDMScheduler.set_timesteps.device",description:`<strong>device</strong> (<code>str</code> or <code>torch.device</code>, <em>optional</em>) &#x2014;
The device to which the timesteps should be moved to. If <code>None</code>, the timesteps are not moved.`,name:"device"}],source:"https://github.com/huggingface/diffusers/blob/vr_10175/src/diffusers/schedulers/scheduling_ipndm.py#L86"}}),E=new G({props:{name:"step",anchor:"diffusers.IPNDMScheduler.step",parameters:[{name:"model_output",val:": Tensor"},{name:"timestep",val:": typing.Union[int, torch.Tensor]"},{name:"sample",val:": Tensor"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.IPNDMScheduler.step.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) &#x2014;
The direct output from learned diffusion model.`,name:"model_output"},{anchor:"diffusers.IPNDMScheduler.step.timestep",description:`<strong>timestep</strong> (<code>int</code>) &#x2014;
The current discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.IPNDMScheduler.step.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
A current instance of a sample created by the diffusion process.`,name:"sample"},{anchor:"diffusers.IPNDMScheduler.step.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>) &#x2014;
Whether or not to return a <a href="/docs/diffusers/pr_10175/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput">SchedulerOutput</a> or tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_10175/src/diffusers/schedulers/scheduling_ipndm.py#L138",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>If return_dict is <code>True</code>, <a
href="/docs/diffusers/pr_10175/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput"
>SchedulerOutput</a> is returned, otherwise a
tuple is returned where the first element is the sample tensor.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><a
href="/docs/diffusers/pr_10175/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput"
>SchedulerOutput</a> or <code>tuple</code></p>
`}}),O=new be({props:{title:"SchedulerOutput",local:"diffusers.schedulers.scheduling_utils.SchedulerOutput",headingTag:"h2"}}),k=new G({props:{name:"class diffusers.schedulers.scheduling_utils.SchedulerOutput",anchor:"diffusers.schedulers.scheduling_utils.SchedulerOutput",parameters:[{name:"prev_sample",val:": Tensor"}],parametersDescription:[{anchor:"diffusers.schedulers.scheduling_utils.SchedulerOutput.prev_sample",description:`<strong>prev_sample</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, num_channels, height, width)</code> for images) &#x2014;
Computed sample <code>(x_{t-1})</code> of previous timestep. <code>prev_sample</code> should be used as next model input in the
denoising loop.`,name:"prev_sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10175/src/diffusers/schedulers/scheduling_utils.py#L60"}}),H=new ke({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/schedulers/ipndm.md"}}),{c(){_=d("meta"),F=n(),R=d("p"),J=n(),l(y.$$.fragment),K=n(),T=d("p"),T.innerHTML=xe,Q=n(),l(I.$$.fragment),X=n(),o=d("div"),l(w.$$.fragment),ae=n(),V=d("p"),V.textContent=Se,ue=n(),A=d("p"),A.innerHTML=Pe,ce=n(),b=d("div"),l(N.$$.fragment),le=n(),z=d("p"),z.textContent=De,pe=n(),$=d("div"),l(C.$$.fragment),me=n(),U=d("p"),U.textContent=Me,fe=n(),x=d("div"),l(L.$$.fragment),he=n(),j=d("p"),j.textContent=ye,ge=n(),S=d("div"),l(E.$$.fragment),_e=n(),q=d("p"),q.textContent=Te,Y=n(),l(O.$$.fragment),Z=n(),v=d("div"),l(k.$$.fragment),ve=n(),B=d("p"),B.innerHTML=Ie,ee=n(),l(H.$$.fragment),te=n(),W=d("p"),this.h()},l(e){const r=Oe("svelte-u9bgzb",document.head);_=a(r,"META",{name:!0,content:!0}),r.forEach(t),F=i(e),R=a(e,"P",{}),P(R).forEach(t),J=i(e),p(y.$$.fragment,e),K=i(e),T=a(e,"P",{"data-svelte-h":!0}),D(T)!=="svelte-z0ks0i"&&(T.innerHTML=xe),Q=i(e),p(I.$$.fragment,e),X=i(e),o=a(e,"DIV",{class:!0});var u=P(o);p(w.$$.fragment,u),ae=i(u),V=a(u,"P",{"data-svelte-h":!0}),D(V)!=="svelte-uir5v"&&(V.textContent=Se),ue=i(u),A=a(u,"P",{"data-svelte-h":!0}),D(A)!=="svelte-lcp8x5"&&(A.innerHTML=Pe),ce=i(u),b=a(u,"DIV",{class:!0});var re=P(b);p(N.$$.fragment,re),le=i(re),z=a(re,"P",{"data-svelte-h":!0}),D(z)!=="svelte-1rkfgpx"&&(z.textContent=De),re.forEach(t),pe=i(u),$=a(u,"DIV",{class:!0});var ne=P($);p(C.$$.fragment,ne),me=i(ne),U=a(ne,"P",{"data-svelte-h":!0}),D(U)!=="svelte-1k141rk"&&(U.textContent=Me),ne.forEach(t),fe=i(u),x=a(u,"DIV",{class:!0});var ie=P(x);p(L.$$.fragment,ie),he=i(ie),j=a(ie,"P",{"data-svelte-h":!0}),D(j)!=="svelte-1vzm9q"&&(j.textContent=ye),ie.forEach(t),ge=i(u),S=a(u,"DIV",{class:!0});var oe=P(S);p(E.$$.fragment,oe),_e=i(oe),q=a(oe,"P",{"data-svelte-h":!0}),D(q)!=="svelte-1n4l8et"&&(q.textContent=Te),oe.forEach(t),u.forEach(t),Y=i(e),p(O.$$.fragment,e),Z=i(e),v=a(e,"DIV",{class:!0});var de=P(v);p(k.$$.fragment,de),ve=i(de),B=a(de,"P",{"data-svelte-h":!0}),D(B)!=="svelte-6ojmkw"&&(B.innerHTML=Ie),de.forEach(t),ee=i(e),p(H.$$.fragment,e),te=i(e),W=a(e,"P",{}),P(W).forEach(t),this.h()},h(){M(_,"name","hf:doc:metadata"),M(_,"content",Ve),M(b,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(o,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,r){s(document.head,_),c(e,F,r),c(e,R,r),c(e,J,r),m(y,e,r),c(e,K,r),c(e,T,r),c(e,Q,r),m(I,e,r),c(e,X,r),c(e,o,r),m(w,o,null),s(o,ae),s(o,V),s(o,ue),s(o,A),s(o,ce),s(o,b),m(N,b,null),s(b,le),s(b,z),s(o,pe),s(o,$),m(C,$,null),s($,me),s($,U),s(o,fe),s(o,x),m(L,x,null),s(x,he),s(x,j),s(o,ge),s(o,S),m(E,S,null),s(S,_e),s(S,q),c(e,Y,r),m(O,e,r),c(e,Z,r),c(e,v,r),m(k,v,null),s(v,ve),s(v,B),c(e,ee,r),m(H,e,r),c(e,te,r),c(e,W,r),se=!0},p:Ne,i(e){se||(f(y.$$.fragment,e),f(I.$$.fragment,e),f(w.$$.fragment,e),f(N.$$.fragment,e),f(C.$$.fragment,e),f(L.$$.fragment,e),f(E.$$.fragment,e),f(O.$$.fragment,e),f(k.$$.fragment,e),f(H.$$.fragment,e),se=!0)},o(e){h(y.$$.fragment,e),h(I.$$.fragment,e),h(w.$$.fragment,e),h(N.$$.fragment,e),h(C.$$.fragment,e),h(L.$$.fragment,e),h(E.$$.fragment,e),h(O.$$.fragment,e),h(k.$$.fragment,e),h(H.$$.fragment,e),se=!1},d(e){e&&(t(F),t(R),t(J),t(K),t(T),t(Q),t(X),t(o),t(Y),t(Z),t(v),t(ee),t(te),t(W)),t(_),g(y,e),g(I,e),g(w),g(N),g(C),g(L),g(E),g(O,e),g(k),g(H,e)}}}const Ve='{"title":"IPNDMScheduler","local":"ipndmscheduler","sections":[{"title":"IPNDMScheduler","local":"diffusers.IPNDMScheduler","sections":[],"depth":2},{"title":"SchedulerOutput","local":"diffusers.schedulers.scheduling_utils.SchedulerOutput","sections":[],"depth":2}],"depth":1}';function Ae($e){return Ce(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Be extends Le{constructor(_){super(),Ee(this,_,Ae,He,we,{})}}export{Be as component};

Xet Storage Details

Size:
12.1 kB
·
Xet hash:
5043a9e92f261278837a7869af892627198cea300154dd2ce487805d66eaa4a7

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.