Buckets:

rtrm's picture
download
raw
7.67 kB
import{s as ne,n as re,o as oe}from"../chunks/scheduler.8c3d61f6.js";import{S as ce,i as de,g as d,s as r,r as C,A as ie,h as i,f as t,c as o,j as H,u as T,x as W,k as M,y as l,a as n,v as S,d as w,t as E,w as P}from"../chunks/index.da70eac4.js";import{D as Q}from"../chunks/Docstring.6b390b9a.js";import{H as se,E as ae}from"../chunks/EditOnGithub.1e64e623.js";function le(X){let a,O,L,I,m,G,h,Y='This scheduler is a part of the <code>ConsistencyDecoderPipeline</code> and was introduced in <a href="https://openai.com/dall-e-3" rel="nofollow">DALL-E 3</a>.',V,f,Z='The original codebase can be found at <a href="https://github.com/openai/consistency_models" rel="nofollow">openai/consistency_models</a>.',k,g,z,c,_,B,u,y,F,v,ee=`Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.`,J,p,$,K,D,te=`Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
process from the learned model outputs (most often the predicted noise).`,N,b,U,A,j;return m=new se({props:{title:"ConsistencyDecoderScheduler",local:"consistencydecoderscheduler",headingTag:"h1"}}),g=new se({props:{title:"ConsistencyDecoderScheduler",local:"diffusers.schedulers.ConsistencyDecoderScheduler",headingTag:"h2"}}),_=new Q({props:{name:"class diffusers.schedulers.ConsistencyDecoderScheduler",anchor:"diffusers.schedulers.ConsistencyDecoderScheduler",parameters:[{name:"num_train_timesteps",val:": int = 1024"},{name:"sigma_data",val:": float = 0.5"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_consistency_decoder.py#L72"}}),y=new Q({props:{name:"scale_model_input",anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.scale_model_input",parameters:[{name:"sample",val:": Tensor"},{name:"timestep",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.scale_model_input.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
The input sample.`,name:"sample"},{anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.scale_model_input.timestep",description:`<strong>timestep</strong> (<code>int</code>, <em>optional</em>) &#x2014;
The current timestep in the diffusion chain.`,name:"timestep"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_consistency_decoder.py#L116",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>A scaled input sample.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),$=new Q({props:{name:"step",anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.step",parameters:[{name:"model_output",val:": Tensor"},{name:"timestep",val:": typing.Union[float, torch.Tensor]"},{name:"sample",val:": Tensor"},{name:"generator",val:": typing.Optional[torch._C.Generator] = None"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.step.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) &#x2014;
The direct output from the learned diffusion model.`,name:"model_output"},{anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.step.timestep",description:`<strong>timestep</strong> (<code>float</code>) &#x2014;
The current timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.step.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
A current instance of a sample created by the diffusion process.`,name:"sample"},{anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.step.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) &#x2014;
A random number generator.`,name:"generator"},{anchor:"diffusers.schedulers.ConsistencyDecoderScheduler.step.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a
<code>~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput</code> or <code>tuple</code>.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_consistency_decoder.py#L133",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>If return_dict is <code>True</code>,
<code>~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput</code> is returned, otherwise
a tuple is returned where the first element is the sample tensor.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput</code> or <code>tuple</code></p>
`}}),b=new ae({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/schedulers/consistency_decoder.md"}}),{c(){a=d("meta"),O=r(),L=d("p"),I=r(),C(m.$$.fragment),G=r(),h=d("p"),h.innerHTML=Y,V=r(),f=d("p"),f.innerHTML=Z,k=r(),C(g.$$.fragment),z=r(),c=d("div"),C(_.$$.fragment),B=r(),u=d("div"),C(y.$$.fragment),F=r(),v=d("p"),v.textContent=ee,J=r(),p=d("div"),C($.$$.fragment),K=r(),D=d("p"),D.textContent=te,N=r(),C(b.$$.fragment),U=r(),A=d("p"),this.h()},l(e){const s=ie("svelte-u9bgzb",document.head);a=i(s,"META",{name:!0,content:!0}),s.forEach(t),O=o(e),L=i(e,"P",{}),H(L).forEach(t),I=o(e),T(m.$$.fragment,e),G=o(e),h=i(e,"P",{"data-svelte-h":!0}),W(h)!=="svelte-c0tz5b"&&(h.innerHTML=Y),V=o(e),f=i(e,"P",{"data-svelte-h":!0}),W(f)!=="svelte-1ci2nab"&&(f.innerHTML=Z),k=o(e),T(g.$$.fragment,e),z=o(e),c=i(e,"DIV",{class:!0});var x=H(c);T(_.$$.fragment,x),B=o(x),u=i(x,"DIV",{class:!0});var q=H(u);T(y.$$.fragment,q),F=o(q),v=i(q,"P",{"data-svelte-h":!0}),W(v)!=="svelte-1rkfgpx"&&(v.textContent=ee),q.forEach(t),J=o(x),p=i(x,"DIV",{class:!0});var R=H(p);T($.$$.fragment,R),K=o(R),D=i(R,"P",{"data-svelte-h":!0}),W(D)!=="svelte-hi84tp"&&(D.textContent=te),R.forEach(t),x.forEach(t),N=o(e),T(b.$$.fragment,e),U=o(e),A=i(e,"P",{}),H(A).forEach(t),this.h()},h(){M(a,"name","hf:doc:metadata"),M(a,"content",ue),M(u,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(p,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(c,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,s){l(document.head,a),n(e,O,s),n(e,L,s),n(e,I,s),S(m,e,s),n(e,G,s),n(e,h,s),n(e,V,s),n(e,f,s),n(e,k,s),S(g,e,s),n(e,z,s),n(e,c,s),S(_,c,null),l(c,B),l(c,u),S(y,u,null),l(u,F),l(u,v),l(c,J),l(c,p),S($,p,null),l(p,K),l(p,D),n(e,N,s),S(b,e,s),n(e,U,s),n(e,A,s),j=!0},p:re,i(e){j||(w(m.$$.fragment,e),w(g.$$.fragment,e),w(_.$$.fragment,e),w(y.$$.fragment,e),w($.$$.fragment,e),w(b.$$.fragment,e),j=!0)},o(e){E(m.$$.fragment,e),E(g.$$.fragment,e),E(_.$$.fragment,e),E(y.$$.fragment,e),E($.$$.fragment,e),E(b.$$.fragment,e),j=!1},d(e){e&&(t(O),t(L),t(I),t(G),t(h),t(V),t(f),t(k),t(z),t(c),t(N),t(U),t(A)),t(a),P(m,e),P(g,e),P(_),P(y),P($),P(b,e)}}}const ue='{"title":"ConsistencyDecoderScheduler","local":"consistencydecoderscheduler","sections":[{"title":"ConsistencyDecoderScheduler","local":"diffusers.schedulers.ConsistencyDecoderScheduler","sections":[],"depth":2}],"depth":1}';function pe(X){return oe(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class _e extends ce{constructor(a){super(),de(this,a,pe,le,ne,{})}}export{_e as component};

Xet Storage Details

Size:
7.67 kB
·
Xet hash:
cdabe8acd9c39aa64b2095b37a15561888630e2fa1e6ca0e3cd349fa09bc5f20

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.