Buckets:

rtrm's picture
download
raw
6 kB
import{s as te,o as re,n as se}from"../chunks/scheduler.8c3d61f6.js";import{S as ae,i as oe,g as l,s as o,r as M,A as ne,h as m,f as r,c as n,j as G,u as S,x as E,k as B,y as w,a,v as y,d as P,t as C,w as H}from"../chunks/index.da70eac4.js";import{T as ie}from"../chunks/Tip.1d9b8c37.js";import{D as Z}from"../chunks/Docstring.6b390b9a.js";import{H as ee,E as de}from"../chunks/EditOnGithub.1e64e623.js";function le(I){let s,u='To learn more about how to load LoRA weights, see the <a href="../../using-diffusers/loading_adapters#lora">LoRA</a> loading guide.';return{c(){s=l("p"),s.innerHTML=u},l(i){s=m(i,"P",{"data-svelte-h":!0}),E(s)!=="svelte-1fw6lx1"&&(s.innerHTML=u)},m(i,v){a(i,s,v)},p:se,d(i){i&&r(s)}}}function me(I){let s,u,i,v,g,R,h,Q='This class is useful when <em>only</em> loading weights into a <a href="/docs/diffusers/pr_10567/en/api/models/sd3_transformer2d#diffusers.SD3Transformer2DModel">SD3Transformer2DModel</a>. If you need to load weights into the text encoder or a text encoder and SD3Transformer2DModel, check <a href="lora#diffusers.loaders.SD3LoraLoaderMixin"><code>SD3LoraLoaderMixin</code></a> class instead.',j,_,W="The <code>SD3Transformer2DLoadersMixin</code> class currently only loads IP-Adapter weights, but will be used in the future to save weights and load LoRAs.",k,p,z,$,O,d,D,J,L,X="Load IP-Adapters and LoRA layers into a <code>[SD3Transformer2DModel]</code>.",K,c,T,N,b,Y="Sets IP-Adapter attention processors, image projection, and loads state_dict.",U,x,V,A,q;return g=new ee({props:{title:"SD3Transformer2D",local:"sd3transformer2d",headingTag:"h1"}}),p=new ie({props:{$$slots:{default:[le]},$$scope:{ctx:I}}}),$=new ee({props:{title:"SD3Transformer2DLoadersMixin",local:"diffusers.loaders.SD3Transformer2DLoadersMixin",headingTag:"h2"}}),D=new Z({props:{name:"class diffusers.loaders.SD3Transformer2DLoadersMixin",anchor:"diffusers.loaders.SD3Transformer2DLoadersMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/loaders/transformer_sd3.py#L21"}}),T=new Z({props:{name:"_load_ip_adapter_weights",anchor:"diffusers.loaders.SD3Transformer2DLoadersMixin._load_ip_adapter_weights",parameters:[{name:"state_dict",val:": typing.Dict"},{name:"low_cpu_mem_usage",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.loaders.SD3Transformer2DLoadersMixin._load_ip_adapter_weights.state_dict",description:`<strong>state_dict</strong> (<code>Dict</code>) &#x2014;
State dict with keys &#x201C;ip_adapter&#x201D;, which contains parameters for attention processors, and
&#x201C;image_proj&#x201D;, which contains parameters for image projection net.`,name:"state_dict"},{anchor:"diffusers.loaders.SD3Transformer2DLoadersMixin._load_ip_adapter_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code> if torch version &gt;= 1.9.0 else <code>False</code>) &#x2014;
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
Only supported for PyTorch &gt;= 1.9.0. If you are using an older version of PyTorch, setting this
argument to <code>True</code> will raise an error.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/loaders/transformer_sd3.py#L24"}}),x=new de({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/loaders/transformer_sd3.md"}}),{c(){s=l("meta"),u=o(),i=l("p"),v=o(),M(g.$$.fragment),R=o(),h=l("p"),h.innerHTML=Q,j=o(),_=l("p"),_.innerHTML=W,k=o(),M(p.$$.fragment),z=o(),M($.$$.fragment),O=o(),d=l("div"),M(D.$$.fragment),J=o(),L=l("p"),L.innerHTML=X,K=o(),c=l("div"),M(T.$$.fragment),N=o(),b=l("p"),b.textContent=Y,U=o(),M(x.$$.fragment),V=o(),A=l("p"),this.h()},l(e){const t=ne("svelte-u9bgzb",document.head);s=m(t,"META",{name:!0,content:!0}),t.forEach(r),u=n(e),i=m(e,"P",{}),G(i).forEach(r),v=n(e),S(g.$$.fragment,e),R=n(e),h=m(e,"P",{"data-svelte-h":!0}),E(h)!=="svelte-xcmda6"&&(h.innerHTML=Q),j=n(e),_=m(e,"P",{"data-svelte-h":!0}),E(_)!=="svelte-b199o"&&(_.innerHTML=W),k=n(e),S(p.$$.fragment,e),z=n(e),S($.$$.fragment,e),O=n(e),d=m(e,"DIV",{class:!0});var f=G(d);S(D.$$.fragment,f),J=n(f),L=m(f,"P",{"data-svelte-h":!0}),E(L)!=="svelte-v7p57z"&&(L.innerHTML=X),K=n(f),c=m(f,"DIV",{class:!0});var F=G(c);S(T.$$.fragment,F),N=n(F),b=m(F,"P",{"data-svelte-h":!0}),E(b)!=="svelte-ym8e73"&&(b.textContent=Y),F.forEach(r),f.forEach(r),U=n(e),S(x.$$.fragment,e),V=n(e),A=m(e,"P",{}),G(A).forEach(r),this.h()},h(){B(s,"name","hf:doc:metadata"),B(s,"content",fe),B(c,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),B(d,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){w(document.head,s),a(e,u,t),a(e,i,t),a(e,v,t),y(g,e,t),a(e,R,t),a(e,h,t),a(e,j,t),a(e,_,t),a(e,k,t),y(p,e,t),a(e,z,t),y($,e,t),a(e,O,t),a(e,d,t),y(D,d,null),w(d,J),w(d,L),w(d,K),w(d,c),y(T,c,null),w(c,N),w(c,b),a(e,U,t),y(x,e,t),a(e,V,t),a(e,A,t),q=!0},p(e,[t]){const f={};t&2&&(f.$$scope={dirty:t,ctx:e}),p.$set(f)},i(e){q||(P(g.$$.fragment,e),P(p.$$.fragment,e),P($.$$.fragment,e),P(D.$$.fragment,e),P(T.$$.fragment,e),P(x.$$.fragment,e),q=!0)},o(e){C(g.$$.fragment,e),C(p.$$.fragment,e),C($.$$.fragment,e),C(D.$$.fragment,e),C(T.$$.fragment,e),C(x.$$.fragment,e),q=!1},d(e){e&&(r(u),r(i),r(v),r(R),r(h),r(j),r(_),r(k),r(z),r(O),r(d),r(U),r(V),r(A)),r(s),H(g,e),H(p,e),H($,e),H(D),H(T),H(x,e)}}}const fe='{"title":"SD3Transformer2D","local":"sd3transformer2d","sections":[{"title":"SD3Transformer2DLoadersMixin","local":"diffusers.loaders.SD3Transformer2DLoadersMixin","sections":[],"depth":2}],"depth":1}';function pe(I){return re(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class $e extends ae{constructor(s){super(),oe(this,s,pe,me,te,{})}}export{$e as component};

Xet Storage Details

Size:
6 kB
·
Xet hash:
df2ef8649f1b5ec9be9c0e0dac036dc54f51618a0210e91a8987c87ff6f0e2a8

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.