Buckets:

rtrm's picture
download
raw
200 kB
import{s as ni,n as tn,o as zi,f as ma,i as fa,r as ga,t as va,b as ua,c as ya,u as Ma,g as xa,d as Ia}from"../chunks/scheduler.94020406.js";import{S as oi,i as ii,e as Ie,a as r,f as n,g as b,H as ce,h as w,j as Z,A as me,k as T,l as ei,s as c,r as X,c as m,u as U,y as v,v as S,B as Cn,d as k,p as Jn,t as L,b as Ln,w as P,x as J,z as si,m as Kt,n as en,C as _a,o as ti,D as ha,E as Ta}from"../chunks/index.a08c8d92.js";import{T as Es}from"../chunks/Tip.3b0aeee8.js";import{g as ba}from"../chunks/globals.7f7f1b26.js";import{e as Te}from"../chunks/each.e59479a4.js";import{I as ai,H as ee,E as ka}from"../chunks/EditOnGithub.b1bceb47.js";import{C as Q}from"../chunks/CodeBlock.b23cf525.js";function Rs(p){let t,o,s;return{c(){t=b("div"),o=new ce(!1),this.h()},l(a){t=w(a,"DIV",{class:!0,style:!0,id:!0});var _=Z(t);o=me(_,!1),_.forEach(n),this.h()},h(){o.a=null,T(t,"class",s="absolute text-base py-1.5 p-2 break-words bg-white border-2 border-black dark:border-gray-500 rounded shadow-alternate-xl z-50 "+(p[0].length>1e3?"max-w-lg":"max-w-xs")),ei(t,"top",p[2]+15+"px"),ei(t,"left",p[1]+15+"px"),T(t,"id",p[3])},m(a,_){r(a,t,_),o.m(p[0],t)},p(a,_){_&1&&o.p(a[0]),_&1&&s!==(s="absolute text-base py-1.5 p-2 break-words bg-white border-2 border-black dark:border-gray-500 rounded shadow-alternate-xl z-50 "+(a[0].length>1e3?"max-w-lg":"max-w-xs"))&&T(t,"class",s),_&4&&ei(t,"top",a[2]+15+"px"),_&2&&ei(t,"left",a[1]+15+"px"),_&8&&T(t,"id",a[3])},d(a){a&&n(t)}}}function Ja(p){let t,o=p[0]&&Rs(p);return{c(){o&&o.c(),t=Ie()},l(s){o&&o.l(s),t=Ie()},m(s,a){o&&o.m(s,a),r(s,t,a)},p(s,[a]){s[0]?o?o.p(s,a):(o=Rs(s),o.c(),o.m(t.parentNode,t)):o&&(o.d(1),o=null)},i:tn,o:tn,d(s){s&&n(t),o&&o.d(s)}}}function La(p,t,o){let{txt:s}=t,{x:a}=t,{y:_}=t,{id:l}=t;return p.$$set=d=>{"txt"in d&&o(0,s=d.txt),"x"in d&&o(1,a=d.x),"y"in d&&o(2,_=d.y),"id"in d&&o(3,l=d.id)},[s,a,_,l]}class Ca extends oi{constructor(t){super(),ii(this,t,La,Ja,ni,{txt:0,x:1,y:2,id:3})}}const Vs="docstring-tooltip";function wa(p,t){let o;function s(d){l(),o=new Ca({props:{txt:t,x:d.pageX,y:d.pageY,id:Vs},target:document.body})}function a(d){o.$set({x:d.pageX,y:d.pageY})}function _(){o.$destroy()}function l(){var I;const d=document.getElementById(Vs);d&&((I=d.parentNode)==null||I.removeChild(d))}return p.addEventListener("mouseover",s),p.addEventListener("mouseleave",_),p.addEventListener("mousemove",a),{destroy(){p.removeEventListener("mouseover",s),p.removeEventListener("mouseleave",_),p.removeEventListener("mousemove",a)}}}const{window:Xa}=ba;function As(p,t,o){const s=p.slice();return s[26]=t[o].title,s[11]=t[o].parametersDescription,s}function Fs(p,t,o){const s=p.slice();return s[9]=t[o].anchor,s[29]=t[o].description,s}function Ys(p,t,o){const s=p.slice();return s[9]=t[o].anchor,s[29]=t[o].description,s}function Hs(p,t,o){const s=p.slice();return s[10]=t[o].name,s[34]=t[o].val,s}function Qs(p){let t,o,s="<",a,_,l="source",d,I,M=">";return{c(){t=b("a"),o=b("span"),o.textContent=s,a=c(),_=b("span"),_.textContent=l,d=c(),I=b("span"),I.textContent=M,this.h()},l(y){t=w(y,"A",{class:!0,href:!0,target:!0});var f=Z(t);o=w(f,"SPAN",{"data-svelte-h":!0}),J(o)!=="svelte-1kd6by1"&&(o.textContent=s),a=m(f),_=w(f,"SPAN",{class:!0,"data-svelte-h":!0}),J(_)!=="svelte-122apf4"&&(_.textContent=l),d=m(f),I=w(f,"SPAN",{"data-svelte-h":!0}),J(I)!=="svelte-x0xyl0"&&(I.textContent=M),f.forEach(n),this.h()},h(){T(_,"class","hidden md:block mx-0.5 hover:!underline"),T(t,"class","!ml-auto !text-gray-400 !no-underline text-sm flex items-center"),T(t,"href",p[7]),T(t,"target","_blank")},m(y,f){r(y,t,f),v(t,o),v(t,a),v(t,_),v(t,d),v(t,I)},p(y,f){f[0]&128&&T(t,"href",y[7])},d(y){y&&n(t)}}}function qs(p){let t,o,s="(",a,_,l,d=")",I,M=Te(p[1]),y=[];for(let u=0;u<M.length;u+=1)y[u]=Os(Hs(p,M,u));let f=p[4]&&Ks(p);return{c(){t=b("p"),o=b("span"),o.textContent=s,a=c();for(let u=0;u<y.length;u+=1)y[u].c();_=c(),l=b("span"),l.textContent=d,I=c(),f&&f.c(),this.h()},l(u){t=w(u,"P",{class:!0});var h=Z(t);o=w(h,"SPAN",{"data-svelte-h":!0}),J(o)!=="svelte-8mvn6a"&&(o.textContent=s),a=m(h);for(let g=0;g<y.length;g+=1)y[g].l(h);_=m(h),l=w(h,"SPAN",{"data-svelte-h":!0}),J(l)!=="svelte-1jq0pl7"&&(l.textContent=d),I=m(h),f&&f.l(h),h.forEach(n),this.h()},h(){T(t,"class","font-mono text-xs md:text-sm !leading-relaxed !my-6")},m(u,h){r(u,t,h),v(t,o),v(t,a);for(let g=0;g<y.length;g+=1)y[g]&&y[g].m(t,null);v(t,_),v(t,l),v(t,I),f&&f.m(t,null)},p(u,h){if(h[0]&147970){M=Te(u[1]);let g;for(g=0;g<M.length;g+=1){const x=Hs(u,M,g);y[g]?y[g].p(x,h):(y[g]=Os(x),y[g].c(),y[g].m(t,_))}for(;g<y.length;g+=1)y[g].d(1);y.length=M.length}u[4]?f?f.p(u,h):(f=Ks(u),f.c(),f.m(t,null)):f&&(f.d(1),f=null)},d(u){u&&n(t),si(y,u),f&&f.d()}}}function Os(p){let t,o,s=p[10]+"",a,_,l=p[34]+"",d,I,M,y,f;function u(){return p[21](p[10])}return{c(){t=b("span"),o=b("span"),a=Kt(s),_=b("span"),d=Kt(l),this.h()},l(h){t=w(h,"SPAN",{class:!0});var g=Z(t);o=w(g,"SPAN",{class:!0});var x=Z(o);a=en(x,s),_=w(x,"SPAN",{class:!0});var j=Z(_);d=en(j,l),j.forEach(n),x.forEach(n),g.forEach(n),this.h()},h(){T(_,"class","opacity-60"),T(o,"class","rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black"),T(t,"class",I="comma "+(p[14][p[10]]?"cursor-pointer":"cursor-default"))},m(h,g){r(h,t,g),v(t,o),v(o,a),v(o,_),v(_,d),y||(f=[ma(M=wa.call(null,t,p[14][p[10]]||"")),Cn(t,"click",_a(ha(u)))],y=!0)},p(h,g){p=h,g[0]&2&&s!==(s=p[10]+"")&&ti(a,s),g[0]&2&&l!==(l=p[34]+"")&&ti(d,l),g[0]&2&&I!==(I="comma "+(p[14][p[10]]?"cursor-pointer":"cursor-default"))&&T(t,"class",I),M&&fa(M.update)&&g[0]&2&&M.update.call(null,p[14][p[10]]||"")},d(h){h&&n(t),y=!1,ga(f)}}}function Ks(p){let t,o="→",s,a,_,l=pa(p[4])+"",d,I,M,y;return{c(){t=b("span"),t.textContent=o,s=c(),a=b("span"),_=new ce(!1),this.h()},l(f){t=w(f,"SPAN",{class:!0,"data-svelte-h":!0}),J(t)!=="svelte-1j6k10o"&&(t.textContent=o),s=m(f),a=w(f,"SPAN",{class:!0});var u=Z(a);_=me(u,!1),u.forEach(n),this.h()},h(){T(t,"class","font-bold"),_.a=null,T(a,"class",d="rounded hover:bg-gray-400 "+(p[3]?"cursor-pointer":"cursor-default"))},m(f,u){r(f,t,u),r(f,s,u),r(f,a,u),_.m(l,a),M||(y=[ma(I=wa.call(null,a,p[3]||"")),Cn(a,"click",_a(ha(p[22])))],M=!0)},p(f,u){u[0]&16&&l!==(l=pa(f[4])+"")&&_.p(l),u[0]&8&&d!==(d="rounded hover:bg-gray-400 "+(f[3]?"cursor-pointer":"cursor-default"))&&T(a,"class",d),I&&fa(I.update)&&u[0]&8&&I.update.call(null,f[3]||"")},d(f){f&&(n(t),n(s),n(a)),M=!1,ga(y)}}}function ea(p){var M;let t,o,s,a=((M=p[11])==null?void 0:M.length)+"",_,l,d,I;return{c(){t=b("div"),o=b("button"),s=Kt("Expand "),_=Kt(a),l=Kt(" parameters"),this.h()},l(y){t=w(y,"DIV",{class:!0});var f=Z(t);o=w(f,"BUTTON",{class:!0});var u=Z(o);s=en(u,"Expand "),_=en(u,a),l=en(u," parameters"),u.forEach(n),f.forEach(n),this.h()},h(){T(o,"class","absolute leading-tight px-3 py-1.5 dark:bg-gray-900 bg-black text-gray-200 hover:text-white rounded-xl bottom-12 ring-offset-2 hover:ring-black hover:ring-2"),T(t,"class","absolute inset-0 bg-gradient-to-t from-white to-white/0 dark:from-gray-950 dark:to-gray-950/0 z-10 flex justify-center")},m(y,f){r(y,t,f),v(t,o),v(o,s),v(o,_),v(o,l),d||(I=Cn(o,"click",p[23]),d=!0)},p(y,f){var u;f[0]&2048&&a!==(a=((u=y[11])==null?void 0:u.length)+"")&&ti(_,a)},d(y){y&&n(t),d=!1,I()}}}function ta(p){let t,o='Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span>',s,a,_,l=Te(p[11]),d=[];for(let M=0;M<l.length;M+=1)d[M]=na(Ys(p,l,M));const I=M=>L(d[M],1,1,()=>{d[M]=null});return{c(){t=b("p"),t.innerHTML=o,s=c(),a=b("ul");for(let M=0;M<d.length;M+=1)d[M].c();this.h()},l(M){t=w(M,"P",{class:!0,"data-svelte-h":!0}),J(t)!=="svelte-lt6pb6"&&(t.innerHTML=o),s=m(M),a=w(M,"UL",{class:!0});var y=Z(a);for(let f=0;f<d.length;f+=1)d[f].l(y);y.forEach(n),this.h()},h(){T(t,"class","flex items-center font-semibold !mt-2 !mb-2 text-gray-800"),T(a,"class","px-2")},m(M,y){r(M,t,y),r(M,s,y),r(M,a,y);for(let f=0;f<d.length;f+=1)d[f]&&d[f].m(a,null);_=!0},p(M,y){if(y[0]&2049){l=Te(M[11]);let f;for(f=0;f<l.length;f+=1){const u=Ys(M,l,f);d[f]?(d[f].p(u,y),k(d[f],1)):(d[f]=na(u),d[f].c(),k(d[f],1),d[f].m(a,null))}for(Jn(),f=l.length;f<d.length;f+=1)I(f);Ln()}},i(M){if(!_){for(let y=0;y<l.length;y+=1)k(d[y]);_=!0}},o(M){d=d.filter(Boolean);for(let y=0;y<d.length;y+=1)L(d[y]);_=!1},d(M){M&&(n(t),n(s),n(a)),si(d,M)}}}function na(p){let t,o,s,a,_,l,d,I,M,y,f=p[29]+"",u,h,g;return _=new ai({props:{classNames:"text-smd"}}),{c(){t=b("li"),o=b("span"),s=b("a"),a=b("span"),X(_.$$.fragment),I=c(),M=b("span"),y=new ce(!1),u=c(),this.h()},l(x){t=w(x,"LI",{class:!0});var j=Z(t);o=w(j,"SPAN",{class:!0});var E=Z(o);s=w(E,"A",{id:!0,class:!0,href:!0});var G=Z(s);a=w(G,"SPAN",{});var oe=Z(a);U(_.$$.fragment,oe),oe.forEach(n),G.forEach(n),I=m(E),M=w(E,"SPAN",{});var ie=Z(M);y=me(ie,!1),ie.forEach(n),E.forEach(n),u=m(j),j.forEach(n),this.h()},h(){T(s,"id",l=p[9]),T(s,"class","header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),T(s,"href",d=`&amp;num;${p[9]}`),y.a=null,T(o,"class","group flex space-x-1.5 items-start"),T(t,"class",h="text-base !pl-4 my-3 rounded "+(p[0]===p[9]?ke:""))},m(x,j){r(x,t,j),v(t,o),v(o,s),v(s,a),S(_,a,null),v(o,I),v(o,M),y.m(f,M),v(t,u),g=!0},p(x,j){(!g||j[0]&2048&&l!==(l=x[9]))&&T(s,"id",l),(!g||j[0]&2048&&d!==(d=`&amp;num;${x[9]}`))&&T(s,"href",d),(!g||j[0]&2048)&&f!==(f=x[29]+"")&&y.p(f),(!g||j[0]&2049&&h!==(h="text-base !pl-4 my-3 rounded "+(x[0]===x[9]?ke:"")))&&T(t,"class",h)},i(x){g||(k(_.$$.fragment,x),g=!0)},o(x){L(_.$$.fragment,x),g=!1},d(x){x&&n(t),P(_)}}}function oa(p){let t,o,s=Te(p[2]),a=[];for(let l=0;l<s.length;l+=1)a[l]=sa(As(p,s,l));const _=l=>L(a[l],1,1,()=>{a[l]=null});return{c(){for(let l=0;l<a.length;l+=1)a[l].c();t=Ie()},l(l){for(let d=0;d<a.length;d+=1)a[d].l(l);t=Ie()},m(l,d){for(let I=0;I<a.length;I+=1)a[I]&&a[I].m(l,d);r(l,t,d),o=!0},p(l,d){if(d[0]&5){s=Te(l[2]);let I;for(I=0;I<s.length;I+=1){const M=As(l,s,I);a[I]?(a[I].p(M,d),k(a[I],1)):(a[I]=sa(M),a[I].c(),k(a[I],1),a[I].m(t.parentNode,t))}for(Jn(),I=s.length;I<a.length;I+=1)_(I);Ln()}},i(l){if(!o){for(let d=0;d<s.length;d+=1)k(a[d]);o=!0}},o(l){a=a.filter(Boolean);for(let d=0;d<a.length;d+=1)L(a[d]);o=!1},d(l){l&&n(t),si(a,l)}}}function ia(p){let t,o,s,a,_,l,d,I,M,y,f=p[29]+"",u,h;return _=new ai({props:{classNames:"text-smd"}}),{c(){t=b("li"),o=b("span"),s=b("a"),a=b("span"),X(_.$$.fragment),I=c(),M=b("span"),y=new ce(!1),this.h()},l(g){t=w(g,"LI",{class:!0});var x=Z(t);o=w(x,"SPAN",{class:!0});var j=Z(o);s=w(j,"A",{id:!0,class:!0,href:!0});var E=Z(s);a=w(E,"SPAN",{});var G=Z(a);U(_.$$.fragment,G),G.forEach(n),E.forEach(n),I=m(j),M=w(j,"SPAN",{});var oe=Z(M);y=me(oe,!1),oe.forEach(n),j.forEach(n),x.forEach(n),this.h()},h(){T(s,"id",l=p[9]),T(s,"class","header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),T(s,"href",d=`&amp;num;${p[9]}`),y.a=null,T(o,"class","group flex space-x-1.5 items-start"),T(t,"class",u="text-base !pl-4 my-3 rounded "+(p[0]===p[9]?ke:""))},m(g,x){r(g,t,x),v(t,o),v(o,s),v(s,a),S(_,a,null),v(o,I),v(o,M),y.m(f,M),h=!0},p(g,x){(!h||x[0]&4&&l!==(l=g[9]))&&T(s,"id",l),(!h||x[0]&4&&d!==(d=`&amp;num;${g[9]}`))&&T(s,"href",d),(!h||x[0]&4)&&f!==(f=g[29]+"")&&y.p(f),(!h||x[0]&5&&u!==(u="text-base !pl-4 my-3 rounded "+(g[0]===g[9]?ke:"")))&&T(t,"class",u)},i(g){h||(k(_.$$.fragment,g),h=!0)},o(g){L(_.$$.fragment,g),h=!1},d(g){g&&n(t),P(_)}}}function sa(p){let t,o=p[26]+"",s,a,_,l,d,I,M,y=Te(p[11]),f=[];for(let h=0;h<y.length;h+=1)f[h]=ia(Fs(p,y,h));const u=h=>L(f[h],1,1,()=>{f[h]=null});return{c(){t=b("p"),s=Kt(o),a=c(),_=b("span"),l=c(),d=b("ul");for(let h=0;h<f.length;h+=1)f[h].c();I=c(),this.h()},l(h){t=w(h,"P",{class:!0});var g=Z(t);s=en(g,o),a=m(g),_=w(g,"SPAN",{class:!0}),Z(_).forEach(n),g.forEach(n),l=m(h),d=w(h,"UL",{class:!0});var x=Z(d);for(let j=0;j<f.length;j+=1)f[j].l(x);I=m(x),x.forEach(n),this.h()},h(){T(_,"class","flex-auto border-t-2 ml-3"),T(t,"class","flex items-center font-semibold"),T(d,"class","px-2")},m(h,g){r(h,t,g),v(t,s),v(t,a),v(t,_),r(h,l,g),r(h,d,g);for(let x=0;x<f.length;x+=1)f[x]&&f[x].m(d,null);v(d,I),M=!0},p(h,g){if((!M||g[0]&4)&&o!==(o=h[26]+"")&&ti(s,o),g[0]&5){y=Te(h[11]);let x;for(x=0;x<y.length;x+=1){const j=Fs(h,y,x);f[x]?(f[x].p(j,g),k(f[x],1)):(f[x]=ia(j),f[x].c(),k(f[x],1),f[x].m(d,I))}for(Jn(),x=y.length;x<f.length;x+=1)u(x);Ln()}},i(h){if(!M){for(let g=0;g<y.length;g+=1)k(f[g]);M=!0}},o(h){f=f.filter(Boolean);for(let g=0;g<f.length;g+=1)L(f[g]);M=!1},d(h){h&&(n(t),n(l),n(d)),si(f,h)}}}function aa(p){let t,o,s,a,_,l,d,I,M,y,f,u=(p[3]||"")+"",h=!!p[4]&&la(p);return{c(){t=b("div"),o=b("p"),s=Kt(p[15]),a=c(),h&&h.c(),_=c(),l=b("span"),M=c(),y=b("p"),f=new ce(!1),this.h()},l(g){t=w(g,"DIV",{id:!0,class:!0});var x=Z(t);o=w(x,"P",{class:!0});var j=Z(o);s=en(j,p[15]),j.forEach(n),a=m(x),h&&h.l(x),_=m(x),l=w(x,"SPAN",{class:!0}),Z(l).forEach(n),x.forEach(n),M=m(g),y=w(g,"P",{class:!0});var E=Z(y);f=me(E,!1),E.forEach(n),this.h()},h(){T(o,"class","text-base"),T(l,"class","flex-auto border-t-2 border-gray-100 dark:border-gray-700"),T(t,"id",d=`${p[9]}.${p[16]}`),T(t,"class",I="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "+(p[0]===p[9]?ke:"")),f.a=null,T(y,"class","text-base")},m(g,x){r(g,t,x),v(t,o),v(o,s),v(t,a),h&&h.m(t,null),v(t,_),v(t,l),r(g,M,x),r(g,y,x),f.m(u,y)},p(g,x){g[4]?h?h.p(g,x):(h=la(g),h.c(),h.m(t,_)):h&&(h.d(1),h=null),x[0]&512&&d!==(d=`${g[9]}.${g[16]}`)&&T(t,"id",d),x[0]&513&&I!==(I="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800 rounded "+(g[0]===g[9]?ke:""))&&T(t,"class",I),x[0]&8&&u!==(u=(g[3]||"")+"")&&f.p(u)},d(g){g&&(n(t),n(M),n(y)),h&&h.d()}}}function la(p){let t,o;return{c(){t=new ce(!1),o=Ie(),this.h()},l(s){t=me(s,!1),o=Ie(),this.h()},h(){t.a=o},m(s,a){t.m(p[4],s,a),r(s,o,a)},p(s,a){a[0]&16&&t.p(s[4])},d(s){s&&(n(o),t.d())}}}function ra(p){let t,o,s="Raises",a,_,l,d,I,M,y,f=(p[5]||"")+"",u=!!p[6]&&da(p);return{c(){t=b("div"),o=b("p"),o.textContent=s,a=c(),u&&u.c(),_=c(),l=b("span"),I=c(),M=b("p"),y=new ce(!1),this.h()},l(h){t=w(h,"DIV",{class:!0,id:!0});var g=Z(t);o=w(g,"P",{class:!0,"data-svelte-h":!0}),J(o)!=="svelte-1ak550f"&&(o.textContent=s),a=m(g),u&&u.l(g),_=m(g),l=w(g,"SPAN",{class:!0}),Z(l).forEach(n),g.forEach(n),I=m(h),M=w(h,"P",{class:!0});var x=Z(M);y=me(x,!1),x.forEach(n),this.h()},h(){T(o,"class","text-base"),T(l,"class","flex-auto border-t-2 border-gray-100 dark:border-gray-700"),T(t,"class","flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800"),T(t,"id",d=`${p[9]}.raises`),y.a=null,T(M,"class","text-base")},m(h,g){r(h,t,g),v(t,o),v(t,a),u&&u.m(t,null),v(t,_),v(t,l),r(h,I,g),r(h,M,g),y.m(f,M)},p(h,g){h[6]?u?u.p(h,g):(u=da(h),u.c(),u.m(t,_)):u&&(u.d(1),u=null),g[0]&512&&d!==(d=`${h[9]}.raises`)&&T(t,"id",d),g[0]&32&&f!==(f=(h[5]||"")+"")&&y.p(f)},d(h){h&&(n(t),n(I),n(M)),u&&u.d()}}}function da(p){let t,o;return{c(){t=new ce(!1),o=Ie(),this.h()},l(s){t=me(s,!1),o=Ie(),this.h()},h(){t.a=o},m(s,a){t.m(p[6],s,a),r(s,o,a)},p(s,a){a[0]&64&&t.p(s[6])},d(s){s&&(n(o),t.d())}}}function Ua(p){let t,o,s,a=p[18](p[10])+"",_,l,d,I,M,y,f,u,h,g,x,j,E,G,oe,ie;d=new ai({});let $=p[7]&&Qs(p),R=!p[8]&&qs(p),N=p[13]&&ea(p),z=!!p[11]&&ta(p),W=p[2]&&oa(p),B=!!p[4]&&aa(p),V=!!p[6]&&ra(p);return{c(){t=b("div"),o=b("span"),s=new ce(!1),_=c(),l=b("a"),X(d.$$.fragment),M=c(),$&&$.c(),y=c(),R&&R.c(),f=c(),u=b("div"),N&&N.c(),h=c(),z&&z.c(),g=c(),W&&W.c(),x=c(),B&&B.c(),j=c(),V&&V.c(),this.h()},l(D){t=w(D,"DIV",{});var C=Z(t);o=w(C,"SPAN",{class:!0,id:!0});var A=Z(o);s=me(A,!1),_=m(A),l=w(A,"A",{id:!0,class:!0,href:!0});var te=Z(l);U(d.$$.fragment,te),te.forEach(n),M=m(A),$&&$.l(A),A.forEach(n),y=m(C),R&&R.l(C),f=m(C),u=w(C,"DIV",{class:!0});var q=Z(u);N&&N.l(q),h=m(q),z&&z.l(q),g=m(q),W&&W.l(q),x=m(q),B&&B.l(q),j=m(q),V&&V.l(q),q.forEach(n),C.forEach(n),this.h()},h(){s.a=_,T(l,"id",p[9]),T(l,"class","header-link invisible with-hover:group-hover:visible pr-2"),T(l,"href",I="#"+p[9]),T(o,"class","group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5"),T(o,"id",p[9]),T(u,"class",E="!mb-10 relative docstring-details "+(p[13]?"max-h-96 overflow-hidden":""))},m(D,C){r(D,t,C),v(t,o),s.m(a,o),v(o,_),v(o,l),S(d,l,null),v(o,M),$&&$.m(o,null),v(t,y),R&&R.m(t,null),v(t,f),v(t,u),N&&N.m(u,null),v(u,h),z&&z.m(u,null),v(u,g),W&&W.m(u,null),v(u,x),B&&B.m(u,null),v(u,j),V&&V.m(u,null),p[24](u),G=!0,oe||(ie=Cn(Xa,"hashchange",p[19]),oe=!0)},p(D,C){(!G||C[0]&1024)&&a!==(a=D[18](D[10])+"")&&s.p(a),(!G||C[0]&512)&&T(l,"id",D[9]),(!G||C[0]&512&&I!==(I="#"+D[9]))&&T(l,"href",I),D[7]?$?$.p(D,C):($=Qs(D),$.c(),$.m(o,null)):$&&($.d(1),$=null),(!G||C[0]&512)&&T(o,"id",D[9]),D[8]?R&&(R.d(1),R=null):R?R.p(D,C):(R=qs(D),R.c(),R.m(t,f)),D[13]?N?N.p(D,C):(N=ea(D),N.c(),N.m(u,h)):N&&(N.d(1),N=null),D[11]?z?(z.p(D,C),C[0]&2048&&k(z,1)):(z=ta(D),z.c(),k(z,1),z.m(u,g)):z&&(Jn(),L(z,1,1,()=>{z=null}),Ln()),D[2]?W?(W.p(D,C),C[0]&4&&k(W,1)):(W=oa(D),W.c(),k(W,1),W.m(u,x)):W&&(Jn(),L(W,1,1,()=>{W=null}),Ln()),D[4]?B?B.p(D,C):(B=aa(D),B.c(),B.m(u,j)):B&&(B.d(1),B=null),D[6]?V?V.p(D,C):(V=ra(D),V.c(),V.m(u,null)):V&&(V.d(1),V=null),(!G||C[0]&8192&&E!==(E="!mb-10 relative docstring-details "+(D[13]?"max-h-96 overflow-hidden":"")))&&T(u,"class",E)},i(D){G||(k(d.$$.fragment,D),k(z),k(W),G=!0)},o(D){L(d.$$.fragment,D),L(z),L(W),G=!1},d(D){D&&n(t),P(d),$&&$.d(),R&&R.d(),N&&N.d(),z&&z.d(),W&&W.d(),B&&B.d(),V&&V.d(),p[24](null),oe=!1,ie()}}}const ke="bg-yellow-50 dark:bg-[#494a3d]";function pa(p){const t=/\s*<p>(((?!<p>).)*)<\/p>\s*/gms;return p.replace(t,(o,s)=>`<span>${s}</span>`)}function Sa(p,t,o){let{anchor:s}=t,{name:a}=t,{parameters:_=[]}=t,{parametersDescription:l}=t,{parameterGroups:d}=t,{returnDescription:I}=t,{returnType:M}=t,{isYield:y=!1}=t,{raiseDescription:f}=t,{raiseType:u}=t,{source:h=void 0}=t,{hashlink:g}=t,{isGetSetDescriptor:x=!1}=t,j,E,G=!1;const oe=(l==null?void 0:l.reduce((C,A)=>{const{name:te,description:q}=A;return{...C,[te]:q}},{}))||{},ie=y?"Yields":"Returns",$=ie.toLowerCase();zi(()=>{const{hash:C}=window.location;o(0,g=C.substring(1));const q=[...j.querySelectorAll('[href^="#"]')].map(de=>de.id).includes(g);o(13,G=!q&&j.clientHeight>500),z()});async function R(C,A){A&&(o(13,G=!1),await va(),window.location.hash=C)}function N(C){if(C.startsWith("class ")){const A=C.substring(6).split("."),te=A.pop();return`<h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">${A.join(".")}.</span><span class="font-semibold">${te}</span></span></h3>`}else return x?`<div class="flex items-center rounded-xl py-0.5 break-all bg-gradient-to-r from-green-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-green-700 dark:text-green-300 font-medium px-2"><svg class="fill-current text-2xl text-green-500 inline-block" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle cx="12.5" cy="12.5" r="7.5" fill-opacity="0.2"></circle><path d="M12.8454 17.4994C12.077 17.4994 11.3929 17.3946 10.7931 17.185C10.1933 16.9779 9.68621 16.6731 9.27188 16.2709C8.85756 15.871 8.542 15.382 8.3252 14.8039C8.1084 14.2257 8 13.5681 8 12.831C8 12.1035 8.1084 11.4435 8.3252 10.8509C8.54441 10.2583 8.86358 9.75005 9.28272 9.32608C9.70187 8.89971 10.2138 8.57211 10.8184 8.34326C11.4254 8.11442 12.1168 8 12.8924 8C13.6103 8 14.251 8.10479 14.8147 8.31436C15.3808 8.52393 15.8602 8.82263 16.2528 9.21046C16.6479 9.59588 16.9478 10.0548 17.1525 10.5871C17.3597 11.1171 17.4621 11.7036 17.4596 12.3468C17.4621 12.79 17.4235 13.1971 17.344 13.5681C17.2645 13.9391 17.1393 14.2631 16.9682 14.5401C16.7996 14.8147 16.578 15.0327 16.3034 15.1941C16.0288 15.3531 15.6963 15.4434 15.3061 15.4651C15.0267 15.4868 14.8002 15.4663 14.6268 15.4037C14.4534 15.341 14.3209 15.2483 14.2293 15.1254C14.1402 15.0002 14.0824 14.8544 14.0559 14.6882H14.0125C13.9547 14.8328 13.8415 14.9641 13.6729 15.0821C13.5043 15.1977 13.2983 15.288 13.055 15.3531C12.8141 15.4157 12.5576 15.4386 12.2854 15.4217C12.0011 15.4049 11.7313 15.3386 11.476 15.223C11.2231 15.1074 10.9978 14.94 10.8003 14.7208C10.6052 14.5015 10.451 14.2305 10.3378 13.9078C10.227 13.585 10.1704 13.2116 10.168 12.7876C10.1704 12.3685 10.2294 12.0035 10.345 11.6928C10.4631 11.3821 10.6184 11.1207 10.8112 10.9087C11.0063 10.6967 11.2231 10.5305 11.4616 10.4101C11.7 10.2896 11.9397 10.2125 12.1806 10.1788C12.4528 10.1379 12.7106 10.1379 12.9538 10.1788C13.1971 10.2198 13.4019 10.286 13.5681 10.3776C13.7367 10.4691 13.8415 10.5679 13.8825 10.6738H13.9331V10.2692H15.064V13.7957C15.0664 13.962 15.1038 14.0908 15.176 14.1824C15.2483 14.2739 15.3459 14.3197 15.4687 14.3197C15.6349 14.3197 15.7734 14.2462 15.8842 14.0993C15.9975 13.9523 16.0818 13.7271 16.1372 13.4236C16.195 13.1201 16.2239 12.7334 16.2239 12.2637C16.2239 11.8108 16.1637 11.4134 16.0432 11.0713C15.9252 10.7268 15.759 10.4342 15.5446 10.1933C15.3326 9.94998 15.0857 9.75246 14.8039 9.6007C14.522 9.44894 14.2161 9.33813 13.8861 9.26827C13.5585 9.19841 13.2212 9.16349 12.8744 9.16349C12.2745 9.16349 11.7506 9.25502 11.3026 9.4381C10.8545 9.61876 10.4811 9.8729 10.1824 10.2005C9.88374 10.5281 9.65971 10.9123 9.51036 11.3532C9.36342 11.7916 9.28875 12.2697 9.28634 12.7876C9.28875 13.3585 9.36824 13.8644 9.52482 14.3052C9.6838 14.7436 9.91746 15.1122 10.2258 15.4109C10.5341 15.7096 10.9147 15.936 11.3676 16.0902C11.8205 16.2444 12.3408 16.3215 12.9286 16.3215C13.2056 16.3215 13.4766 16.301 13.7415 16.26C14.0065 16.2215 14.2462 16.1733 14.4606 16.1155C14.675 16.0601 14.8472 16.0059 14.9773 15.9529L15.335 17.0008C15.1833 17.0875 14.9773 17.1682 14.7171 17.2428C14.4594 17.3199 14.1679 17.3814 13.8427 17.4271C13.5199 17.4753 13.1875 17.4994 12.8454 17.4994ZM12.6792 14.233C12.9731 14.233 13.2068 14.1764 13.3802 14.0631C13.5561 13.9499 13.6813 13.7825 13.756 13.5609C13.8331 13.3369 13.8692 13.061 13.8644 12.7334C13.862 12.4444 13.8247 12.1999 13.7524 11.9999C13.6825 11.7976 13.5609 11.6446 13.3874 11.541C13.2164 11.4351 12.9779 11.3821 12.672 11.3821C12.4046 11.3821 12.177 11.4387 11.9891 11.5519C11.8036 11.6651 11.6615 11.8241 11.5627 12.0288C11.4664 12.2312 11.417 12.4697 11.4146 12.7443C11.417 12.9996 11.4579 13.2417 11.5374 13.4706C11.6169 13.697 11.7482 13.8813 11.9313 14.0234C12.1144 14.1631 12.3637 14.233 12.6792 14.233Z"></path></svg><span class="text-sm text-green-500 mr-1">property</span><span> ${C}</span></div>`:`<h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>${C}</span></h4>`}function z(){var A;const{hash:C}=window.location;if(o(0,g=C.substring(1)),E&&E.classList.remove(...ke.split(" ")),g===s){const te=(A=document.getElementById(g))==null?void 0:A.closest(".docstring");te&&(E=te,E.classList.add(...ke.split(" ")))}}const W=C=>R(`${s}.${C}`,!!oe[C]),B=()=>R(`${s}.${$}`,!!I),V=()=>o(13,G=!1);function D(C){ua[C?"unshift":"push"](()=>{j=C,o(12,j)})}return p.$$set=C=>{"anchor"in C&&o(9,s=C.anchor),"name"in C&&o(10,a=C.name),"parameters"in C&&o(1,_=C.parameters),"parametersDescription"in C&&o(11,l=C.parametersDescription),"parameterGroups"in C&&o(2,d=C.parameterGroups),"returnDescription"in C&&o(3,I=C.returnDescription),"returnType"in C&&o(4,M=C.returnType),"isYield"in C&&o(20,y=C.isYield),"raiseDescription"in C&&o(5,f=C.raiseDescription),"raiseType"in C&&o(6,u=C.raiseType),"source"in C&&o(7,h=C.source),"hashlink"in C&&o(0,g=C.hashlink),"isGetSetDescriptor"in C&&o(8,x=C.isGetSetDescriptor)},[g,_,d,I,M,f,u,h,x,s,a,l,j,G,oe,ie,$,R,N,z,y,W,B,V,D]}class se extends oi{constructor(t){super(),ii(this,t,Sa,Ua,ni,{anchor:9,name:10,parameters:1,parametersDescription:11,parameterGroups:2,returnDescription:3,returnType:4,isYield:20,raiseDescription:5,raiseType:6,source:7,hashlink:0,isGetSetDescriptor:8},null,[-1,-1])}}const{window:Pa}=ba;function Za(p){let t,o,s,a,_,l,d,I,M;a=new ai({props:{classNames:"text-smd"}});const y=p[4].default,f=ya(y,p,p[3],null);return{c(){t=b("div"),o=b("a"),s=b("span"),X(a.$$.fragment),l=c(),f&&f.c(),this.h()},l(u){t=w(u,"DIV",{class:!0});var h=Z(t);o=w(h,"A",{id:!0,class:!0,href:!0});var g=Z(o);s=w(g,"SPAN",{});var x=Z(s);U(a.$$.fragment,x),x.forEach(n),g.forEach(n),l=m(h),f&&f.l(h),h.forEach(n),this.h()},h(){T(o,"id",p[0]),T(o,"class","header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),T(o,"href",_=`&amp;num;${p[0]}`),T(t,"class","relative group rounded-md")},m(u,h){r(u,t,h),v(t,o),v(o,s),S(a,s,null),v(t,l),f&&f.m(t,null),p[5](t),d=!0,I||(M=Cn(Pa,"hashchange",p[2]),I=!0)},p(u,[h]){(!d||h&1)&&T(o,"id",u[0]),(!d||h&1&&_!==(_=`&amp;num;${u[0]}`))&&T(o,"href",_),f&&f.p&&(!d||h&8)&&Ma(f,y,u,u[3],d?Ia(y,u[3],h,null):xa(u[3]),null)},i(u){d||(k(a.$$.fragment,u),k(f,u),d=!0)},o(u){L(a.$$.fragment,u),L(f,u),d=!1},d(u){u&&n(t),P(a),f&&f.d(u),p[5](null),I=!1,M()}}}const ca="bg-yellow-50 dark:bg-[#494a3d]";function ja(p,t,o){let{$$slots:s={},$$scope:a}=t,{anchor:_}=t,l;function d(){const{hash:M}=window.location,y=M.substring(1);l&&l.classList.remove(...ca.split(" ")),y===_&&l.classList.add(...ca.split(" "))}zi(()=>{d()});function I(M){ua[M?"unshift":"push"](()=>{l=M,o(1,l)})}return p.$$set=M=>{"anchor"in M&&o(0,_=M.anchor),"$$scope"in M&&o(3,a=M.$$scope)},[_,l,d,a,s,I]}class Ni extends oi{constructor(t){super(),ii(this,t,ja,Za,ni,{anchor:0})}}function Da(p){let t,o="앙상블 방식은 사용 가능한 모든 스케줄러에서 잘 작동합니다!";return{c(){t=b("p"),t.textContent=o},l(s){t=w(s,"P",{"data-svelte-h":!0}),J(t)!=="svelte-1xw8dqq"&&(t.textContent=o)},m(s,a){r(s,t,a)},p:tn,d(s){s&&n(t)}}}function Ga(p){let t,o='refiner는 또한 인페인팅 설정에 잘 사용될 수 있습니다. 아래에 보여지듯이 <a href="/docs/diffusers/main/ko/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline">StableDiffusionXLInpaintPipeline</a> 클래스를 사용해서 만들어보세요.';return{c(){t=b("p"),t.innerHTML=o},l(s){t=w(s,"P",{"data-svelte-h":!0}),J(t)!=="svelte-sbxiih"&&(t.innerHTML=o)},m(s,a){r(s,t,a)},p:tn,d(s){s&&n(t)}}}function $a(p){let t,o="Examples:",s,a,_;return a=new Q({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uWExQaXBlbGluZSUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25YTFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teGwtYmFzZS0xLjAlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBcGlwZSUyMCUzRCUyMHBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMGFuJTIwYXN0cm9uYXV0JTIwcmlkaW5nJTIwYSUyMGhvcnNlJTIwb24lMjBtYXJzJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = StableDiffusionXLPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-base-1.0&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;a photo of an astronaut riding a horse on mars&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){t=b("p"),t.textContent=o,s=c(),X(a.$$.fragment)},l(l){t=w(l,"P",{"data-svelte-h":!0}),J(t)!=="svelte-kvfsh7"&&(t.textContent=o),s=m(l),U(a.$$.fragment,l)},m(l,d){r(l,t,d),r(l,s,d),S(a,l,d),_=!0},p:tn,i(l){_||(k(a.$$.fragment,l),_=!0)},o(l){L(a.$$.fragment,l),_=!1},d(l){l&&(n(t),n(s)),P(a,l)}}}function Wa(p){let t,o="Examples:",s,a,_;return a=new Q({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uWExJbWcySW1nUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25YTEltZzJJbWdQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXhsLXJlZmluZXItMS4wJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXBpcGUlMjAlM0QlMjBwaXBlLnRvKCUyMmN1ZGElMjIpJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGcGF0cmlja3ZvbnBsYXRlbiUyRmltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGYWFfeGwlMkYwMDAwMDAwMDkucG5nJTIyJTBBJTBBaW5pdF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UodXJsKS5jb252ZXJ0KCUyMlJHQiUyMiklMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMGFuJTIwYXN0cm9uYXV0JTIwcmlkaW5nJTIwYSUyMGhvcnNlJTIwb24lMjBtYXJzJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUyQyUyMGltYWdlJTNEaW5pdF9pbWFnZSkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLImg2ImgPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-refiner-1.0&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>url = <span class="hljs-string">&quot;https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>init_image = load_image(url).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;a photo of an astronaut riding a horse on mars&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt, image=init_image).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){t=b("p"),t.textContent=o,s=c(),X(a.$$.fragment)},l(l){t=w(l,"P",{"data-svelte-h":!0}),J(t)!=="svelte-kvfsh7"&&(t.textContent=o),s=m(l),U(a.$$.fragment,l)},m(l,d){r(l,t,d),r(l,s,d),S(a,l,d),_=!0},p:tn,i(l){_||(k(a.$$.fragment,l),_=!0)},o(l){L(a.$$.fragment,l),_=!1},d(l){l&&(n(t),n(s)),P(a,l)}}}function Na(p){let t,o="Examples:",s,a,_;return a=new Q({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uWExJbnBhaW50UGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25YTElucGFpbnRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXhsLWJhc2UtMS4wJTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBJTIwJTIwJTIwJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMEElMjAlMjAlMjAlMjB1c2Vfc2FmZXRlbnNvcnMlM0RUcnVlJTJDJTBBKSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFpbWdfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW8ucG5nJTIyJTBBbWFza191cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRnJhdy5naXRodWJ1c2VyY29udGVudC5jb20lMkZDb21wVmlzJTJGbGF0ZW50LWRpZmZ1c2lvbiUyRm1haW4lMkZkYXRhJTJGaW5wYWludGluZ19leGFtcGxlcyUyRm92ZXJ0dXJlLWNyZWF0aW9ucy01c0k2ZlFnWUl1b19tYXNrLnBuZyUyMiUwQSUwQWluaXRfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKGltZ191cmwpLmNvbnZlcnQoJTIyUkdCJTIyKSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKG1hc2tfdXJsKS5jb252ZXJ0KCUyMlJHQiUyMiklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwbWFqZXN0aWMlMjB0aWdlciUyMHNpdHRpbmclMjBvbiUyMGElMjBiZW5jaCUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMjBpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCUyQyUyMHN0cmVuZ3RoJTNEMC44MCUwQSkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLInpaintPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-base-1.0&quot;</span>,
<span class="hljs-meta">... </span> torch_dtype=torch.float16,
<span class="hljs-meta">... </span> variant=<span class="hljs-string">&quot;fp16&quot;</span>,
<span class="hljs-meta">... </span> use_safetensors=<span class="hljs-literal">True</span>,
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>img_url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>mask_url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>init_image = load_image(img_url).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>mask_image = load_image(mask_url).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;A majestic tiger sitting on a bench&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(
<span class="hljs-meta">... </span> prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=<span class="hljs-number">50</span>, strength=<span class="hljs-number">0.80</span>
<span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){t=b("p"),t.textContent=o,s=c(),X(a.$$.fragment)},l(l){t=w(l,"P",{"data-svelte-h":!0}),J(t)!=="svelte-kvfsh7"&&(t.textContent=o),s=m(l),U(a.$$.fragment,l)},m(l,d){r(l,t,d),r(l,s,d),S(a,l,d),_=!0},p:tn,i(l){_||(k(a.$$.fragment,l),_=!0)},o(l){L(a.$$.fragment,l),_=!1},d(l){l&&(n(t),n(s)),P(a,l)}}}function za(p){let t,o,s,a,_,l,d,I='Stable Diffusion XL은 Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, Robin Rombach에 의해 <a href="https://arxiv.org/abs/2307.01952" rel="nofollow">SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis</a>에서 제안되었습니다.',M,y,f="논문 초록은 다음을 따릅니다:",u,h,g="<em>text-to-image의 latent diffusion 모델인 SDXL을 소개합니다. 이전 버전의 Stable Diffusion과 비교하면, SDXL은 세 배 더큰 규모의 UNet 백본을 포함합니다: 모델 파라미터의 증가는 많은 attention 블럭을 사용하고 더 큰 cross-attention context를 SDXL의 두 번째 텍스트 인코더에 사용하기 때문입니다. 다중 종횡비에 다수의 새로운 conditioning 방법을 구성했습니다. 또한 후에 수정하는 image-to-image 기술을 사용함으로써 SDXL에 의해 생성된 시각적 품질을 향상하기 위해 정제된 모델을 소개합니다. SDXL은 이전 버전의 Stable Diffusion보다 성능이 향상되었고, 이러한 black-box 최신 이미지 생성자와 경쟁력있는 결과를 달성했습니다.</em>",x,j,E,G,oe="<li>Stable Diffusion XL은 특히 786과 1024사이의 이미지에 잘 작동합니다.</li> <li>Stable Diffusion XL은 아래와 같이 학습된 각 텍스트 인코더에 대해 서로 다른 프롬프트를 전달할 수 있습니다. 동일한 프롬프트의 다른 부분을 텍스트 인코더에 전달할 수도 있습니다.</li> <li>Stable Diffusion XL 결과 이미지는 아래에 보여지듯이 정제기(refiner)를 사용함으로써 향상될 수 있습니다.</li>",ie,$,R,N,z='<li><em>Text-to-Image (1024x1024 해상도)</em>: <a href="/docs/diffusers/main/ko/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline">StableDiffusionXLPipeline</a>을 사용한 <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" rel="nofollow">stabilityai/stable-diffusion-xl-base-1.0</a></li> <li><em>Image-to-Image / 정제기(refiner) (1024x1024 해상도)</em>: <a href="/docs/diffusers/main/ko/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline">StableDiffusionXLImg2ImgPipeline</a>를 사용한 <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0" rel="nofollow">stabilityai/stable-diffusion-xl-refiner-1.0</a></li>',W,B,V,D,C=`SDXL을 사용하기 전에 <code>transformers</code>, <code>accelerate</code>, <code>safetensors</code> 와 <code>invisible_watermark</code>를 설치하세요.
다음과 같이 라이브러리를 설치할 수 있습니다:`,A,te,q,de,Xn,Je,Bi='Stable Diffusion XL로 이미지를 생성할 때 워터마크가 보이지 않도록 추가하는 것을 권장하는데, 이는 다운스트림(downstream) 어플리케이션에서 기계에 합성되었는지를 식별하는데 도움을 줄 수 있습니다. 그렇게 하려면 <a href="https://pypi.org/project/invisible-watermark/" rel="nofollow">invisible_watermark 라이브러리</a>를 통해 설치해주세요:',Un,Le,Sn,Ce,Ei="<code>invisible-watermark</code> 라이브러리가 설치되면 워터마커가 <strong>기본적으로</strong> 사용될 것입니다.",Pn,Xe,Ri="생성 또는 안전하게 이미지를 배포하기 위해 다른 규정이 있다면, 다음과 같이 워터마커를 비활성화할 수 있습니다:",Zn,Ue,jn,Se,Dn,Pe,Vi="<em>text-to-image</em>를 위해 다음과 같이 SDXL을 사용할 수 있습니다:",Gn,Ze,$n,je,Wn,De,Ai="<em>image-to-image</em>를 위해 다음과 같이 SDXL을 사용할 수 있습니다:",Nn,Ge,zn,$e,Bn,We,Fi="<em>inpainting</em>를 위해 다음과 같이 SDXL을 사용할 수 있습니다:",En,Ne,Rn,ze,Vn,Be,Yi='<a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" rel="nofollow">base 모델 체크포인트</a>에서, StableDiffusion-XL 또한 고주파 품질을 향상시키는 이미지를 생성하기 위해 낮은 노이즈 단계 이미지를 제거하는데 특화된 <a href="huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0">refiner 체크포인트</a>를 포함하고 있습니다. 이 refiner 체크포인트는 이미지 품질을 향상시키기 위해 base 체크포인트를 실행한 후 “두 번째 단계” 파이프라인에 사용될 수 있습니다.',An,Ee,Hi="refiner를 사용할 때, 쉽게 사용할 수 있습니다",Fn,Re,Qi='<li>1.) base 모델과 refiner을 사용하는데, 이는 <em>Denoisers의 앙상블</em>을 위한 첫 번째 제안된 <a href="https://research.nvidia.com/labs/dir/eDiff-I/" rel="nofollow">eDiff-I</a>를 사용하거나</li> <li>2.) base 모델을 거친 후 <a href="https://arxiv.org/abs/2108.01073" rel="nofollow">SDEdit</a> 방법으로 단순하게 refiner를 실행시킬 수 있습니다.</li>',Yn,Ve,qi="<strong>참고</strong>: SD-XL base와 refiner를 앙상블로 사용하는 아이디어는 커뮤니티 기여자들이 처음으로 제안했으며, 이는 다음과 같은 <code>diffusers</code>를 구현하는 데도 도움을 주셨습니다.",Hn,Ae,Oi='<li><a href="https://github.com/SytanSD" rel="nofollow">SytanSD</a></li> <li><a href="https://github.com/bghira" rel="nofollow">bghira</a></li> <li><a href="https://github.com/Birch-san" rel="nofollow">Birch-san</a></li> <li><a href="https://github.com/AmericanPresidentJimmyCarter" rel="nofollow">AmericanPresidentJimmyCarter</a></li>',Qn,Fe,qn,Ye,Ki="base와 refiner 모델을 denoiser의 앙상블로 사용할 때, base 모델은 고주파 diffusion 단계를 위한 전문가의 역할을 해야하고, refiner는 낮은 노이즈 diffusion 단계를 위한 전문가의 역할을 해야 합니다.",On,He,es="2.)에 비해 1.)의 장점은 전체적으로 denoising 단계가 덜 필요하므로 속도가 훨씬 더 빨라집니다. 단점은 base 모델의 결과를 검사할 수 없다는 것입니다. 즉, 여전히 노이즈가 심하게 제거됩니다.",Kn,Qe,ts=`base 모델과 refiner를 denoiser의 앙상블로 사용하기 위해 각각 고노이즈(high-nosise) (<em>즉</em> base 모델)와 저노이즈 (<em>즉</em> refiner 모델)의 노이즈를 제거하는 단계를 거쳐야하는 타임스텝의 기간을 정의해야 합니다.
base 모델의 <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.denoising_end" rel="nofollow"><code>denoising_end</code></a>와 refiner 모델의 <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start" rel="nofollow"><code>denoising_start</code></a>를 사용해 간격을 정합니다.`,eo,qe,ns=`<code>denoising_end</code>와 <code>denoising_start</code> 모두 0과 1사이의 실수 값으로 전달되어야 합니다.
전달되면 노이즈 제거의 끝과 시작은 모델 스케줄에 의해 정의된 이산적(discrete) 시간 간격의 비율로 정의됩니다.
노이즈 제거 단계의 수는 모델이 학습된 불연속적인 시간 간격과 선언된 fractional cutoff에 의해 결정되므로 ‘강도’ 또한 선언된 경우 이 값이 ‘강도’를 재정의합니다.`,to,Oe,os=`예시를 들어보겠습니다.
우선, 두 개의 파이프라인을 가져옵니다. 텍스트 인코더와 variational autoencoder는 동일하므로 refiner를 위해 다시 불러오지 않아도 됩니다.`,no,Ke,oo,et,is="이제 추론 단계의 수와 고노이즈에서 노이즈를 제거하는 단계(<em>즉</em> base 모델)를 거쳐 실행되는 지점을 정의합니다.",io,tt,so,nt,ss="Stable Diffusion XL base 모델은 타임스텝 0-999에 학습되며 Stable Diffusion XL refiner는 포괄적인 낮은 노이즈 타임스텝인 0-199에 base 모델로 부터 파인튜닝되어, 첫 800 타임스텝 (높은 노이즈)에 base 모델을 사용하고 마지막 200 타입스텝 (낮은 노이즈)에서 refiner가 사용됩니다. 따라서, <code>high_noise_frac</code>는 0.8로 설정하고, 모든 200-999 스텝(노이즈 제거 타임스텝의 첫 80%)은 base 모델에 의해 수행되며 0-199 스텝(노이즈 제거 타임스텝의 마지막 20%)은 refiner 모델에 의해 수행됩니다.",ao,ot,as="기억하세요, 노이즈 제거 절차는 <strong>높은 값</strong>(높은 노이즈) 타임스텝에서 시작되고, <strong>낮은 값</strong> (낮은 노이즈) 타임스텝에서 끝납니다.",lo,it,ls="이제 두 파이프라인을 실행해봅시다. <code>denoising_end</code>과 <code>denoising_start</code>를 같은 값으로 설정하고 <code>num_inference_steps</code>는 상수로 유지합니다. 또한 base 모델의 출력은 잠재 공간에 있어야 한다는 점을 기억하세요:",ro,st,po,at,rs="이미지를 살펴보겠습니다.",co,lt,ds='<thead><tr><th>원래의 이미지</th> <th>Denoiser들의 앙상블</th></tr></thead> <tbody><tr><td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lion_base.png" alt="lion_base"/></td> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lion_refined.png" alt="lion_ref"/></td></tr></tbody>',mo,rt,ps="동일한 40 단계에서 base 모델을 실행한다면, 이미지의 디테일(예: 사자의 눈과 코)이 떨어졌을 것입니다:",fo,fe,go,dt,uo,pt,cs='일반적인 <code>StableDiffusionImg2ImgPipeline</code> 방식에서, 기본 모델에서 생성된 완전히 노이즈가 제거된 이미지는 <a href="huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0">refiner checkpoint</a>를 사용해 더 향상시킬 수 있습니다.',_o,ct,ms="이를 위해, 보통의 “base” text-to-image 파이프라인을 수행 후에 image-to-image 파이프라인으로써 refiner를 실행시킬 수 있습니다. base 모델의 출력을 잠재 공간에 남겨둘 수 있습니다.",ho,mt,bo,ft,fs='<thead><tr><th>원래의 이미지</th> <th>정제된 이미지</th></tr></thead> <tbody><tr><td><img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/init_image.png"/></td> <td><img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/refined_image.png"/></td></tr></tbody>',wo,ge,vo,gt,gs="Denoiser 앙상블 설정에서 인페인팅에 refiner를 사용하려면 다음을 수행하면 됩니다:",yo,ut,Mo,_t,us="일반적인 SDE 설정에서 인페인팅에 refiner를 사용하기 위해, <code>denoising_end</code>와 <code>denoising_start</code>를 제거하고 refiner의 추론 단계의 수를 적게 선택하세요.",xo,ht,Io,bt,_s="<code>from_single_file()</code>를 사용함으로써 원래의 파일 형식을 <code>diffusers</code> 형식으로 불러올 수 있습니다:",To,wt,ko,vt,Jo,yt,hs="out-of-memory 에러가 난다면, <code>StableDiffusionXLPipeline.enable_model_cpu_offload()</code>을 사용하는 것을 권장합니다.",Lo,Mt,Co,xt,bs="그리고",Xo,It,Uo,Tt,So,kt,ws="<code>torch.compile</code>를 사용함으로써 추론 속도를 올릴 수 있습니다. 이는 <strong>ca.</strong> 20% 속도 향상이 됩니다.",Po,Jt,Zo,Lt,jo,Ct,vs="<strong>참고</strong> Stable Diffusion XL을 <code>torch</code>가 2.0 버전 미만에서 실행시키고 싶을 때, xformers 어텐션을 사용해주세요:",Do,Xt,Go,Ut,$o,St,Wo,F,Pt,li,nn,ys="Pipeline for text-to-image generation using Stable Diffusion XL.",ri,on,Ms=`This model inherits from <code>DiffusionPipeline</code>. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)`,di,sn,xs="The pipeline also inherits the following loading methods:",pi,an,Is="<li><code>load_textual_inversion()</code> for loading textual inversion embeddings</li> <li><code>from_single_file()</code> for loading <code>.ckpt</code> files</li> <li><code>load_lora_weights()</code> for loading LoRA weights</li> <li><code>save_lora_weights()</code> for saving LoRA weights</li> <li><code>load_ip_adapter()</code> for loading IP Adapters</li>",ci,ae,Zt,mi,ln,Ts="Function invoked when calling the pipeline for generation.",fi,ue,gi,_e,jt,ui,rn,ks="Encodes the prompt into text encoder hidden states.",_i,he,Dt,hi,dn,Js='See <a href="https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298" rel="nofollow">https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298</a>',No,Gt,zo,Y,$t,bi,pn,Ls="Pipeline for text-to-image generation using Stable Diffusion XL.",wi,cn,Cs=`This model inherits from <code>DiffusionPipeline</code>. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)`,vi,mn,Xs="The pipeline also inherits the following loading methods:",yi,fn,Us="<li><code>load_textual_inversion()</code> for loading textual inversion embeddings</li> <li><code>from_single_file()</code> for loading <code>.ckpt</code> files</li> <li><code>load_lora_weights()</code> for loading LoRA weights</li> <li><code>save_lora_weights()</code> for saving LoRA weights</li> <li><code>load_ip_adapter()</code> for loading IP Adapters</li>",Mi,le,Wt,xi,gn,Ss="Function invoked when calling the pipeline for generation.",Ii,be,Ti,we,Nt,ki,un,Ps="Encodes the prompt into text encoder hidden states.",Ji,ve,zt,Li,_n,Zs='See <a href="https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298" rel="nofollow">https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298</a>',Bo,Bt,Eo,H,Et,Ci,hn,js="Pipeline for text-to-image generation using Stable Diffusion XL.",Xi,bn,Ds=`This model inherits from <code>DiffusionPipeline</code>. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)`,Ui,wn,Gs="The pipeline also inherits the following loading methods:",Si,vn,$s="<li><code>load_textual_inversion()</code> for loading textual inversion embeddings</li> <li><code>from_single_file()</code> for loading <code>.ckpt</code> files</li> <li><code>load_lora_weights()</code> for loading LoRA weights</li> <li><code>save_lora_weights()</code> for saving LoRA weights</li> <li><code>load_ip_adapter()</code> for loading IP Adapters</li>",Pi,re,Rt,Zi,yn,Ws="Function invoked when calling the pipeline for generation.",ji,ye,Di,Me,Vt,Gi,Mn,Ns="Encodes the prompt into text encoder hidden states.",$i,xe,At,Wi,xn,zs='See <a href="https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298" rel="nofollow">https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298</a>',Ro,Ft,Vo,Yt,Bs='Stable Diffusion XL는 두 개의 텍스트 인코더에 학습되었습니다. 기본 동작은 각 프롬프트에 동일한 프롬프트를 전달하는 것입니다. 그러나 <a href="https://github.com/huggingface/diffusers/issues/4004#issuecomment-1627764201" rel="nofollow">일부 사용자</a>가 품질을 향상시킬 수 있다고 지적한 것처럼 텍스트 인코더마다 다른 프롬프트를 전달할 수 있습니다. 그렇게 하려면, <code>prompt_2</code>와 <code>negative_prompt_2</code>를 <code>prompt</code>와 <code>negative_prompt</code>에 전달해야 합니다. 그렇게 함으로써, 원래의 프롬프트들(<code>prompt</code>)과 부정 프롬프트들(<code>negative_prompt</code>)를 <code>텍스트 인코더</code>에 전달할 것입니다.(공식 SDXL 0.9/1.0의 <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">OpenAI CLIP-ViT/L-14</a>에서 볼 수 있습니다.) 그리고 <code>prompt_2</code>와 <code>negative_prompt_2</code>는 <code>text_encoder_2</code>에 전달됩니다.(공식 SDXL 0.9/1.0의 <a href="https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" rel="nofollow">OpenCLIP-ViT/bigG-14</a>에서 볼 수 있습니다.)',Ao,Ht,Fo,Qt,Yo,kn,Ho;return _=new ee({props:{title:"Stable diffusion XL",local:"stable-diffusion-xl",headingTag:"h1"}}),j=new ee({props:{title:"팁",local:"팁",headingTag:"h2"}}),$=new ee({props:{title:"이용가능한 체크포인트:",local:"이용가능한-체크포인트",headingTag:"h3"}}),B=new ee({props:{title:"사용 예시",local:"사용-예시",headingTag:"h2"}}),te=new Q({props:{code:"cGlwJTIwaW5zdGFsbCUyMHRyYW5zZm9ybWVycyUwQXBpcCUyMGluc3RhbGwlMjBhY2NlbGVyYXRlJTBBcGlwJTIwaW5zdGFsbCUyMHNhZmV0ZW5zb3JzJTBBcGlwJTIwaW5zdGFsbCUyMGludmlzaWJsZS13YXRlcm1hcmslM0UlM0QwLjIuMA==",highlighted:`pip install transformers
pip install accelerate
pip install safetensors
pip install invisible-watermark&gt;=0.2.0`,wrap:!1}}),de=new ee({props:{title:"워터마커",local:"워터마커",headingTag:"h3"}}),Le=new Q({props:{code:"cGlwJTIwaW5zdGFsbCUyMGludmlzaWJsZS13YXRlcm1hcmslM0UlM0QwLjIuMA==",highlighted:"pip install invisible-watermark&gt;=0.2.0",wrap:!1}}),Ue=new Q({props:{code:"cGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblhMUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKC4uLiUyQyUyMGFkZF93YXRlcm1hcmtlciUzREZhbHNlKQ==",highlighted:'pipe = StableDiffusionXLPipeline.from_pretrained(..., add_watermarker=<span class="hljs-literal">False</span>)',wrap:!1}}),Se=new ee({props:{title:"Text-to-Image",local:"text-to-image",headingTag:"h3"}}),Ze=new Q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblhMUGlwZWxpbmUlMEFpbXBvcnQlMjB0b3JjaCUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25YTFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teGwtYmFzZS0xLjAlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMkFzdHJvbmF1dCUyMGluJTIwYSUyMGp1bmdsZSUyQyUyMGNvbGQlMjBjb2xvciUyMHBhbGV0dGUlMkMlMjBtdXRlZCUyMGNvbG9ycyUyQyUyMGRldGFpbGVkJTJDJTIwOGslMjIlMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTNEcHJvbXB0KS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLPipeline
<span class="hljs-keyword">import</span> torch
pipe = StableDiffusionXLPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-base-1.0&quot;</span>, torch_dtype=torch.float16, variant=<span class="hljs-string">&quot;fp16&quot;</span>, use_safetensors=<span class="hljs-literal">True</span>
)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
prompt = <span class="hljs-string">&quot;Astronaut in a jungle, cold color palette, muted colors, detailed, 8k&quot;</span>
image = pipe(prompt=prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),je=new ee({props:{title:"Image-to-image",local:"image-to-image",headingTag:"h3"}}),Ge=new Q({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uWExJbWcySW1nUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25YTEltZzJJbWdQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXhsLXJlZmluZXItMS4wJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB1c2Vfc2FmZXRlbnNvcnMlM0RUcnVlJTBBKSUwQXBpcGUlMjAlM0QlMjBwaXBlLnRvKCUyMmN1ZGElMjIpJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGcGF0cmlja3ZvbnBsYXRlbiUyRmltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGYWFfeGwlMkYwMDAwMDAwMDkucG5nJTIyJTBBJTBBaW5pdF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UodXJsKS5jb252ZXJ0KCUyMlJHQiUyMiklMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMGFuJTIwYXN0cm9uYXV0JTIwcmlkaW5nJTIwYSUyMGhvcnNlJTIwb24lMjBtYXJzJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUyQyUyMGltYWdlJTNEaW5pdF9pbWFnZSkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLImg2ImgPipeline
<span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-refiner-1.0&quot;</span>, torch_dtype=torch.float16, variant=<span class="hljs-string">&quot;fp16&quot;</span>, use_safetensors=<span class="hljs-literal">True</span>
)
pipe = pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
url = <span class="hljs-string">&quot;https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png&quot;</span>
init_image = load_image(url).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
prompt = <span class="hljs-string">&quot;a photo of an astronaut riding a horse on mars&quot;</span>
image = pipe(prompt, image=init_image).images[<span class="hljs-number">0</span>]`,wrap:!1}}),$e=new ee({props:{title:"인페인팅",local:"인페인팅",headingTag:"h3"}}),Ne=new Q({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uWExJbnBhaW50UGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25YTElucGFpbnRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXhsLWJhc2UtMS4wJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB1c2Vfc2FmZXRlbnNvcnMlM0RUcnVlJTBBKSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFpbWdfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW8ucG5nJTIyJTBBbWFza191cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRnJhdy5naXRodWJ1c2VyY29udGVudC5jb20lMkZDb21wVmlzJTJGbGF0ZW50LWRpZmZ1c2lvbiUyRm1haW4lMkZkYXRhJTJGaW5wYWludGluZ19leGFtcGxlcyUyRm92ZXJ0dXJlLWNyZWF0aW9ucy01c0k2ZlFnWUl1b19tYXNrLnBuZyUyMiUwQSUwQWluaXRfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKGltZ191cmwpLmNvbnZlcnQoJTIyUkdCJTIyKSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKG1hc2tfdXJsKS5jb252ZXJ0KCUyMlJHQiUyMiklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwbWFqZXN0aWMlMjB0aWdlciUyMHNpdHRpbmclMjBvbiUyMGElMjBiZW5jaCUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlM0Rwcm9tcHQlMkMlMjBpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCUyQyUyMHN0cmVuZ3RoJTNEMC44MCkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLInpaintPipeline
<span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-base-1.0&quot;</span>, torch_dtype=torch.float16, variant=<span class="hljs-string">&quot;fp16&quot;</span>, use_safetensors=<span class="hljs-literal">True</span>
)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
img_url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png&quot;</span>
mask_url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png&quot;</span>
init_image = load_image(img_url).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
mask_image = load_image(mask_url).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
prompt = <span class="hljs-string">&quot;A majestic tiger sitting on a bench&quot;</span>
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=<span class="hljs-number">50</span>, strength=<span class="hljs-number">0.80</span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),ze=new ee({props:{title:"이미지 결과물을 정제하기",local:"이미지-결과물을-정제하기",headingTag:"h3"}}),Fe=new ee({props:{title:"1.) Denoisers의 앙상블",local:"1-denoisers의-앙상블",headingTag:"h4"}}),Ke=new Q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFiYXNlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJpbGl0eWFpJTJGc3RhYmxlLWRpZmZ1c2lvbi14bC1iYXNlLTEuMCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdXNlX3NhZmV0ZW5zb3JzJTNEVHJ1ZSUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcmVmaW5lciUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teGwtcmVmaW5lci0xLjAlMjIlMkMlMEElMjAlMjAlMjAlMjB0ZXh0X2VuY29kZXJfMiUzRGJhc2UudGV4dF9lbmNvZGVyXzIlMkMlMEElMjAlMjAlMjAlMjB2YWUlM0RiYXNlLnZhZSUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMkMlMEElMjAlMjAlMjAlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUwQSklMEFyZWZpbmVyLnRvKCUyMmN1ZGElMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline
<span class="hljs-keyword">import</span> torch
base = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-base-1.0&quot;</span>, torch_dtype=torch.float16, variant=<span class="hljs-string">&quot;fp16&quot;</span>, use_safetensors=<span class="hljs-literal">True</span>
)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
refiner = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-refiner-1.0&quot;</span>,
text_encoder_2=base.text_encoder_2,
vae=base.vae,
torch_dtype=torch.float16,
use_safetensors=<span class="hljs-literal">True</span>,
variant=<span class="hljs-string">&quot;fp16&quot;</span>,
)
refiner.to(<span class="hljs-string">&quot;cuda&quot;</span>)`,wrap:!1}}),tt=new Q({props:{code:"bl9zdGVwcyUyMCUzRCUyMDQwJTBBaGlnaF9ub2lzZV9mcmFjJTIwJTNEJTIwMC44",highlighted:`n_steps = <span class="hljs-number">40</span>
high_noise_frac = <span class="hljs-number">0.8</span>`,wrap:!1}}),st=new Q({props:{code:"cHJvbXB0JTIwJTNEJTIwJTIyQSUyMG1hamVzdGljJTIwbGlvbiUyMGp1bXBpbmclMjBmcm9tJTIwYSUyMGJpZyUyMHN0b25lJTIwYXQlMjBuaWdodCUyMiUwQSUwQWltYWdlJTIwJTNEJTIwYmFzZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEbl9zdGVwcyUyQyUwQSUyMCUyMCUyMCUyMGRlbm9pc2luZ19lbmQlM0RoaWdoX25vaXNlX2ZyYWMlMkMlMEElMjAlMjAlMjAlMjBvdXRwdXRfdHlwZSUzRCUyMmxhdGVudCUyMiUyQyUwQSkuaW1hZ2VzJTBBaW1hZ2UlMjAlM0QlMjByZWZpbmVyKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0RuX3N0ZXBzJTJDJTBBJTIwJTIwJTIwJTIwZGVub2lzaW5nX3N0YXJ0JTNEaGlnaF9ub2lzZV9mcmFjJTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSkuaW1hZ2VzJTVCMCU1RA==",highlighted:`prompt = <span class="hljs-string">&quot;A majestic lion jumping from a big stone at night&quot;</span>
image = base(
prompt=prompt,
num_inference_steps=n_steps,
denoising_end=high_noise_frac,
output_type=<span class="hljs-string">&quot;latent&quot;</span>,
).images
image = refiner(
prompt=prompt,
num_inference_steps=n_steps,
denoising_start=high_noise_frac,
image=image,
).images[<span class="hljs-number">0</span>]`,wrap:!1}}),fe=new Es({props:{$$slots:{default:[Da]},$$scope:{ctx:p}}}),dt=new ee({props:{title:"2.) 노이즈가 완전히 제거된 기본 이미지에서 이미지 출력을 정제하기",local:"2-노이즈가-완전히-제거된-기본-이미지에서-이미지-출력을-정제하기",headingTag:"h4"}}),mt=new Q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJpbGl0eWFpJTJGc3RhYmxlLWRpZmZ1c2lvbi14bC1iYXNlLTEuMCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdXNlX3NhZmV0ZW5zb3JzJTNEVHJ1ZSUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcmVmaW5lciUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teGwtcmVmaW5lci0xLjAlMjIlMkMlMEElMjAlMjAlMjAlMjB0ZXh0X2VuY29kZXJfMiUzRHBpcGUudGV4dF9lbmNvZGVyXzIlMkMlMEElMjAlMjAlMjAlMjB2YWUlM0RwaXBlLnZhZSUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMkMlMEElMjAlMjAlMjAlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUwQSklMEFyZWZpbmVyLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQXN0cm9uYXV0JTIwaW4lMjBhJTIwanVuZ2xlJTJDJTIwY29sZCUyMGNvbG9yJTIwcGFsZXR0ZSUyQyUyMG11dGVkJTIwY29sb3JzJTJDJTIwZGV0YWlsZWQlMkMlMjA4ayUyMiUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlM0Rwcm9tcHQlMkMlMjBvdXRwdXRfdHlwZSUzRCUyMmxhdGVudCUyMiUyMGlmJTIwdXNlX3JlZmluZXIlMjBlbHNlJTIwJTIycGlsJTIyKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2UlMjAlM0QlMjByZWZpbmVyKHByb21wdCUzRHByb21wdCUyQyUyMGltYWdlJTNEaW1hZ2UlNUJOb25lJTJDJTIwJTNBJTVEKS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline
<span class="hljs-keyword">import</span> torch
pipe = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-base-1.0&quot;</span>, torch_dtype=torch.float16, variant=<span class="hljs-string">&quot;fp16&quot;</span>, use_safetensors=<span class="hljs-literal">True</span>
)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
refiner = DiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-refiner-1.0&quot;</span>,
text_encoder_2=pipe.text_encoder_2,
vae=pipe.vae,
torch_dtype=torch.float16,
use_safetensors=<span class="hljs-literal">True</span>,
variant=<span class="hljs-string">&quot;fp16&quot;</span>,
)
refiner.to(<span class="hljs-string">&quot;cuda&quot;</span>)
prompt = <span class="hljs-string">&quot;Astronaut in a jungle, cold color palette, muted colors, detailed, 8k&quot;</span>
image = pipe(prompt=prompt, output_type=<span class="hljs-string">&quot;latent&quot;</span> <span class="hljs-keyword">if</span> use_refiner <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;pil&quot;</span>).images[<span class="hljs-number">0</span>]
image = refiner(prompt=prompt, image=image[<span class="hljs-literal">None</span>, :]).images[<span class="hljs-number">0</span>]`,wrap:!1}}),ge=new Es({props:{$$slots:{default:[Ga]},$$scope:{ctx:p}}}),ut=new Q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblhMSW5wYWludFBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uWExJbnBhaW50UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJpbGl0eWFpJTJGc3RhYmxlLWRpZmZ1c2lvbi14bC1iYXNlLTEuMCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTIwdXNlX3NhZmV0ZW5zb3JzJTNEVHJ1ZSUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcmVmaW5lciUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblhMSW5wYWludFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teGwtcmVmaW5lci0xLjAlMjIlMkMlMEElMjAlMjAlMjAlMjB0ZXh0X2VuY29kZXJfMiUzRHBpcGUudGV4dF9lbmNvZGVyXzIlMkMlMEElMjAlMjAlMjAlMjB2YWUlM0RwaXBlLnZhZSUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMkMlMEElMjAlMjAlMjAlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUwQSklMEFyZWZpbmVyLnRvKCUyMmN1ZGElMjIpJTBBJTBBaW1nX3VybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZsYXRlbnQtZGlmZnVzaW9uJTJGbWFpbiUyRmRhdGElMkZpbnBhaW50aW5nX2V4YW1wbGVzJTJGb3ZlcnR1cmUtY3JlYXRpb25zLTVzSTZmUWdZSXVvLnBuZyUyMiUwQW1hc2tfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW9fbWFzay5wbmclMjIlMEElMEFpbml0X2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZShpbWdfdXJsKS5jb252ZXJ0KCUyMlJHQiUyMiklMEFtYXNrX2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZShtYXNrX3VybCkuY29udmVydCglMjJSR0IlMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMG1hamVzdGljJTIwdGlnZXIlMjBzaXR0aW5nJTIwb24lMjBhJTIwYmVuY2glMjIlMEFudW1faW5mZXJlbmNlX3N0ZXBzJTIwJTNEJTIwNzUlMEFoaWdoX25vaXNlX2ZyYWMlMjAlM0QlMjAwLjclMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0Rpbml0X2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEbnVtX2luZmVyZW5jZV9zdGVwcyUyQyUwQSUyMCUyMCUyMCUyMGRlbm9pc2luZ19zdGFydCUzRGhpZ2hfbm9pc2VfZnJhYyUyQyUwQSUyMCUyMCUyMCUyMG91dHB1dF90eXBlJTNEJTIybGF0ZW50JTIyJTJDJTBBKS5pbWFnZXMlMEFpbWFnZSUyMCUzRCUyMHJlZmluZXIoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMG1hc2tfaW1hZ2UlM0RtYXNrX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRG51bV9pbmZlcmVuY2Vfc3RlcHMlMkMlMEElMjAlMjAlMjAlMjBkZW5vaXNpbmdfc3RhcnQlM0RoaWdoX25vaXNlX2ZyYWMlMkMlMEEpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLInpaintPipeline
<span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-base-1.0&quot;</span>, torch_dtype=torch.float16, variant=<span class="hljs-string">&quot;fp16&quot;</span>, use_safetensors=<span class="hljs-literal">True</span>
)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
refiner = StableDiffusionXLInpaintPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-refiner-1.0&quot;</span>,
text_encoder_2=pipe.text_encoder_2,
vae=pipe.vae,
torch_dtype=torch.float16,
use_safetensors=<span class="hljs-literal">True</span>,
variant=<span class="hljs-string">&quot;fp16&quot;</span>,
)
refiner.to(<span class="hljs-string">&quot;cuda&quot;</span>)
img_url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png&quot;</span>
mask_url = <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png&quot;</span>
init_image = load_image(img_url).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
mask_image = load_image(mask_url).convert(<span class="hljs-string">&quot;RGB&quot;</span>)
prompt = <span class="hljs-string">&quot;A majestic tiger sitting on a bench&quot;</span>
num_inference_steps = <span class="hljs-number">75</span>
high_noise_frac = <span class="hljs-number">0.7</span>
image = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
num_inference_steps=num_inference_steps,
denoising_start=high_noise_frac,
output_type=<span class="hljs-string">&quot;latent&quot;</span>,
).images
image = refiner(
prompt=prompt,
image=image,
mask_image=mask_image,
num_inference_steps=num_inference_steps,
denoising_start=high_noise_frac,
).images[<span class="hljs-number">0</span>]`,wrap:!1}}),ht=new ee({props:{title:"단독 체크포인트 파일 / 원래의 파일 형식으로 불러오기",local:"단독-체크포인트-파일--원래의-파일-형식으로-불러오기",headingTag:"h3"}}),wt=new Q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblhMUGlwZWxpbmUlMkMlMjBTdGFibGVEaWZmdXNpb25YTEltZzJJbWdQaXBlbGluZSUwQWltcG9ydCUyMHRvcmNoJTBBJTBBcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblhMUGlwZWxpbmUuZnJvbV9zaW5nbGVfZmlsZSglMEElMjAlMjAlMjAlMjAlMjIuJTJGc2RfeGxfYmFzZV8xLjAuc2FmZXRlbnNvcnMlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQXJlZmluZXIlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25YTEltZzJJbWdQaXBlbGluZS5mcm9tX3NpbmdsZV9maWxlKCUwQSUyMCUyMCUyMCUyMCUyMi4lMkZzZF94bF9yZWZpbmVyXzEuMC5zYWZldGVuc29ycyUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFyZWZpbmVyLnRvKCUyMmN1ZGElMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
<span class="hljs-keyword">import</span> torch
pipe = StableDiffusionXLPipeline.from_single_file(
<span class="hljs-string">&quot;./sd_xl_base_1.0.safetensors&quot;</span>, torch_dtype=torch.float16
)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
refiner = StableDiffusionXLImg2ImgPipeline.from_single_file(
<span class="hljs-string">&quot;./sd_xl_refiner_1.0.safetensors&quot;</span>, torch_dtype=torch.float16
)
refiner.to(<span class="hljs-string">&quot;cuda&quot;</span>)`,wrap:!1}}),vt=new ee({props:{title:"모델 offloading을 통해 메모리 최적화하기",local:"모델-offloading을-통해-메모리-최적화하기",headingTag:"h3"}}),Mt=new Q({props:{code:"LSUyMHBpcGUudG8oJTIyY3VkYSUyMiklMEElMkIlMjBwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgp",highlighted:`<span class="hljs-deletion">- pipe.to(&quot;cuda&quot;)</span>
<span class="hljs-addition">+ pipe.enable_model_cpu_offload()</span>`,wrap:!1}}),It=new Q({props:{code:"LSUyMHJlZmluZXIudG8oJTIyY3VkYSUyMiklMEElMkIlMjByZWZpbmVyLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgp",highlighted:`<span class="hljs-deletion">- refiner.to(&quot;cuda&quot;)</span>
<span class="hljs-addition">+ refiner.enable_model_cpu_offload()</span>`,wrap:!1}}),Tt=new ee({props:{title:"torch.compile 로 추론 속도를 올리기",local:"torchcompile-로-추론-속도를-올리기",headingTag:"h3"}}),Jt=new Q({props:{code:"JTJCJTIwcGlwZS51bmV0JTIwJTNEJTIwdG9yY2guY29tcGlsZShwaXBlLnVuZXQlMkMlMjBtb2RlJTNEJTIycmVkdWNlLW92ZXJoZWFkJTIyJTJDJTIwZnVsbGdyYXBoJTNEVHJ1ZSklMEElMkIlMjByZWZpbmVyLnVuZXQlMjAlM0QlMjB0b3JjaC5jb21waWxlKHJlZmluZXIudW5ldCUyQyUyMG1vZGUlM0QlMjJyZWR1Y2Utb3ZlcmhlYWQlMjIlMkMlMjBmdWxsZ3JhcGglM0RUcnVlKQ==",highlighted:`<span class="hljs-addition">+ pipe.unet = torch.compile(pipe.unet, mode=&quot;reduce-overhead&quot;, fullgraph=True)</span>
<span class="hljs-addition">+ refiner.unet = torch.compile(refiner.unet, mode=&quot;reduce-overhead&quot;, fullgraph=True)</span>`,wrap:!1}}),Lt=new ee({props:{title:"torch < 2.0 일 때 실행하기",local:"torch-lt-20-일-때-실행하기",headingTag:"h3"}}),Xt=new Q({props:{code:"cGlwJTIwaW5zdGFsbCUyMHhmb3JtZXJz",highlighted:"pip install xformers",wrap:!1}}),Ut=new Q({props:{code:"JTJCcGlwZS5lbmFibGVfeGZvcm1lcnNfbWVtb3J5X2VmZmljaWVudF9hdHRlbnRpb24oKSUwQSUyQnJlZmluZXIuZW5hYmxlX3hmb3JtZXJzX21lbW9yeV9lZmZpY2llbnRfYXR0ZW50aW9uKCk=",highlighted:`<span class="hljs-addition">+pipe.enable_xformers_memory_efficient_attention()</span>
<span class="hljs-addition">+refiner.enable_xformers_memory_efficient_attention()</span>`,wrap:!1}}),St=new ee({props:{title:"StableDiffusionXLPipeline",local:"diffusers.StableDiffusionXLPipeline",headingTag:"h2"}}),Pt=new se({props:{name:"class diffusers.StableDiffusionXLPipeline",anchor:"diffusers.StableDiffusionXLPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"text_encoder_2",val:": CLIPTextModelWithProjection"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"tokenizer_2",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"},{name:"force_zeros_for_empty_prompt",val:": bool = True"},{name:"add_watermarker",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLPipeline.vae",description:`<strong>vae</strong> (<code>AutoencoderKL</code>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionXLPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) &#x2014;
Frozen text-encoder. Stable Diffusion XL uses the text portion of
<a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically
the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionXLPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code> CLIPTextModelWithProjection</code>) &#x2014;
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
<a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection" rel="nofollow">CLIP</a>,
specifically the
<a href="https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" rel="nofollow">laion/CLIP-ViT-bigG-14-laion2B-39B-b160k</a>
variant.`,name:"text_encoder_2"},{anchor:"diffusers.StableDiffusionXLPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) &#x2014;
Tokenizer of class
<a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionXLPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>CLIPTokenizer</code>) &#x2014;
Second Tokenizer of class
<a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer_2"},{anchor:"diffusers.StableDiffusionXLPipeline.unet",description:"<strong>unet</strong> (<code>UNet2DConditionModel</code>) &#x2014; Conditional U-Net architecture to denoise the encoded image latents.",name:"unet"},{anchor:"diffusers.StableDiffusionXLPipeline.scheduler",description:`<strong>scheduler</strong> (<code>SchedulerMixin</code>) &#x2014;
A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of
<code>DDIMScheduler</code>, <code>LMSDiscreteScheduler</code>, or <code>PNDMScheduler</code>.`,name:"scheduler"},{anchor:"diffusers.StableDiffusionXLPipeline.force_zeros_for_empty_prompt",description:`<strong>force_zeros_for_empty_prompt</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>&quot;True&quot;</code>) &#x2014;
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
<code>stabilityai/stable-diffusion-xl-base-1-0</code>.`,name:"force_zeros_for_empty_prompt"},{anchor:"diffusers.StableDiffusionXLPipeline.add_watermarker",description:`<strong>add_watermarker</strong> (<code>bool</code>, <em>optional</em>) &#x2014;
Whether to use the <a href="https://github.com/ShieldMnt/invisible-watermark/" rel="nofollow">invisible_watermark library</a> to
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
watermarker will be used.`,name:"add_watermarker"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py#L163"}}),Zt=new se({props:{name:"__call__",anchor:"diffusers.StableDiffusionXLPipeline.__call__",parameters:[{name:"prompt",val:": Union = None"},{name:"prompt_2",val:": Union = None"},{name:"height",val:": Optional = None"},{name:"width",val:": Optional = None"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": List = None"},{name:"sigmas",val:": List = None"},{name:"denoising_end",val:": Optional = None"},{name:"guidance_scale",val:": float = 5.0"},{name:"negative_prompt",val:": Union = None"},{name:"negative_prompt_2",val:": Union = None"},{name:"num_images_per_prompt",val:": Optional = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": Union = None"},{name:"latents",val:": Optional = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"pooled_prompt_embeds",val:": Optional = None"},{name:"negative_pooled_prompt_embeds",val:": Optional = None"},{name:"ip_adapter_image",val:": Union = None"},{name:"ip_adapter_image_embeds",val:": Optional = None"},{name:"output_type",val:": Optional = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": Optional = None"},{name:"guidance_rescale",val:": float = 0.0"},{name:"original_size",val:": Optional = None"},{name:"crops_coords_top_left",val:": Tuple = (0, 0)"},{name:"target_size",val:": Optional = None"},{name:"negative_original_size",val:": Optional = None"},{name:"negative_crops_coords_top_left",val:": Tuple = (0, 0)"},{name:"negative_target_size",val:": Optional = None"},{name:"clip_skip",val:": Optional = None"},{name:"callback_on_step_end",val:": Union = None"},{name:"callback_on_step_end_tensor_inputs",val:": List = ['latents']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is
used in both text-encoders`,name:"prompt_2"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The height in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won&#x2019;t work well for
<a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" rel="nofollow">stabilityai/stable-diffusion-xl-base-1.0</a>
and checkpoints that are not specifically fine-tuned on low resolutions.`,name:"height"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The width in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won&#x2019;t work well for
<a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" rel="nofollow">stabilityai/stable-diffusion-xl-base-1.0</a>
and checkpoints that are not specifically fine-tuned on low resolutions.`,name:"width"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument
in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is
passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.denoising_end",description:`<strong>denoising_end</strong> (<code>float</code>, <em>optional</em>) &#x2014;
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
&#x201C;Mixture of Denoisers&#x201D; multi-pipeline setup, as elaborated in <a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output" rel="nofollow"><strong>Refining the Image
Output</strong></a>`,name:"denoising_end"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 5.0) &#x2014;
Guidance scale as defined in <a href="https://arxiv.org/abs/2207.12598" rel="nofollow">Classifier-Free Diffusion Guidance</a>.
<code>guidance_scale</code> is defined as <code>w</code> of equation 2. of <a href="https://arxiv.org/pdf/2205.11487.pdf" rel="nofollow">Imagen
Paper</a>. Guidance scale is enabled by setting <code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to the text <code>prompt</code>,
usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and
<code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">https://arxiv.org/abs/2010.02502</a>. Only applies to
<code>schedulers.DDIMScheduler</code>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.
If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code>
input argument.
ip_adapter_image &#x2014; (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. It should
contain the negative image embedding if <code>do_classifier_free_guidance</code> is set to <code>True</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput</code> instead
of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Guidance rescale factor proposed by <a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are
Flawed</a> <code>guidance_scale</code> is defined as <code>&#x3C6;</code> in equation 16. of
<a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are Flawed</a>.
Guidance rescale factor should fix overexposure when using zero terminal SNR.`,name:"guidance_rescale"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.original_size",description:`<strong>original_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
If <code>original_size</code> is not the same as <code>target_size</code> the image will appear to be down- or upsampled.
<code>original_size</code> defaults to <code>(height, width)</code> if not specified. Part of SDXL&#x2019;s micro-conditioning as
explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"original_size"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.crops_coords_top_left",description:`<strong>crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) &#x2014;
<code>crops_coords_top_left</code> can be used to generate an image that appears to be &#x201C;cropped&#x201D; from the position
<code>crops_coords_top_left</code> downwards. Favorable, well-centered images are usually achieved by setting
<code>crops_coords_top_left</code> to (0, 0). Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"crops_coords_top_left"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.target_size",description:`<strong>target_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
For most cases, <code>target_size</code> should be set to the desired height and width of the generated image. If
not specified it will default to <code>(height, width)</code>. Part of SDXL&#x2019;s micro-conditioning as explained in
section 2.2 of <a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"target_size"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.negative_original_size",description:`<strong>negative_original_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
To negatively condition the generation process based on a specific image resolution. Part of SDXL&#x2019;s
micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_original_size"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.negative_crops_coords_top_left",description:`<strong>negative_crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) &#x2014;
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL&#x2019;s
micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_crops_coords_top_left"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.negative_target_size",description:`<strong>negative_target_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
To negatively condition the generation process based on a target image resolution. It should be as same
as the <code>target_size</code> for most cases. Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_target_size"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <code>PipelineCallback</code>, <code>MultiPipelineCallbacks</code>, <em>optional</em>) &#x2014;
A function or a subclass of <code>PipelineCallback</code> or <code>MultiPipelineCallbacks</code> that is called at the end of
each denoising step during the inference. with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a
list of all tensors as specified by <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.StableDiffusionXLPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py#L819",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput</code> if <code>return_dict</code> is True, otherwise a
<code>tuple</code>. When returning a tuple, the first element is a list with the generated images.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput</code> or <code>tuple</code></p>
`}}),ue=new Ni({props:{anchor:"diffusers.StableDiffusionXLPipeline.__call__.example",$$slots:{default:[$a]},$$scope:{ctx:p}}}),jt=new se({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt",parameters:[{name:"prompt",val:": str"},{name:"prompt_2",val:": Optional = None"},{name:"device",val:": Optional = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"negative_prompt",val:": Optional = None"},{name:"negative_prompt_2",val:": Optional = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"pooled_prompt_embeds",val:": Optional = None"},{name:"negative_pooled_prompt_embeds",val:": Optional = None"},{name:"lora_scale",val:": Optional = None"},{name:"clip_skip",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is
used in both text-encoders
device &#x2014; (<code>torch.device</code>):
torch device`,name:"prompt_2"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and
<code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.
If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code>
input argument.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) &#x2014;
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionXLPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py#L275"}}),Dt=new se({props:{name:"get_guidance_scale_embedding",anchor:"diffusers.StableDiffusionXLPipeline.get_guidance_scale_embedding",parameters:[{name:"w",val:": Tensor"},{name:"embedding_dim",val:": int = 512"},{name:"dtype",val:": dtype = torch.float32"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLPipeline.get_guidance_scale_embedding.w",description:`<strong>w</strong> (<code>torch.Tensor</code>) &#x2014;
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.`,name:"w"},{anchor:"diffusers.StableDiffusionXLPipeline.get_guidance_scale_embedding.embedding_dim",description:`<strong>embedding_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014;
Dimension of the embeddings to generate.`,name:"embedding_dim"},{anchor:"diffusers.StableDiffusionXLPipeline.get_guidance_scale_embedding.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>, defaults to <code>torch.float32</code>) &#x2014;
Data type of the generated embeddings.`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py#L754",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>Embedding vectors with shape <code>(len(w), embedding_dim)</code>.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),Gt=new ee({props:{title:"StableDiffusionXLImg2ImgPipeline",local:"diffusers.StableDiffusionXLImg2ImgPipeline",headingTag:"h2"}}),$t=new se({props:{name:"class diffusers.StableDiffusionXLImg2ImgPipeline",anchor:"diffusers.StableDiffusionXLImg2ImgPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"text_encoder_2",val:": CLIPTextModelWithProjection"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"tokenizer_2",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"},{name:"requires_aesthetics_score",val:": bool = False"},{name:"force_zeros_for_empty_prompt",val:": bool = True"},{name:"add_watermarker",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.vae",description:`<strong>vae</strong> (<code>AutoencoderKL</code>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) &#x2014;
Frozen text-encoder. Stable Diffusion XL uses the text portion of
<a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically
the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code> CLIPTextModelWithProjection</code>) &#x2014;
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
<a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection" rel="nofollow">CLIP</a>,
specifically the
<a href="https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" rel="nofollow">laion/CLIP-ViT-bigG-14-laion2B-39B-b160k</a>
variant.`,name:"text_encoder_2"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) &#x2014;
Tokenizer of class
<a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>CLIPTokenizer</code>) &#x2014;
Second Tokenizer of class
<a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer_2"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.unet",description:"<strong>unet</strong> (<code>UNet2DConditionModel</code>) &#x2014; Conditional U-Net architecture to denoise the encoded image latents.",name:"unet"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.scheduler",description:`<strong>scheduler</strong> (<code>SchedulerMixin</code>) &#x2014;
A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of
<code>DDIMScheduler</code>, <code>LMSDiscreteScheduler</code>, or <code>PNDMScheduler</code>.`,name:"scheduler"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.requires_aesthetics_score",description:`<strong>requires_aesthetics_score</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>&quot;False&quot;</code>) &#x2014;
Whether the <code>unet</code> requires an <code>aesthetic_score</code> condition to be passed during inference. Also see the
config of <code>stabilityai/stable-diffusion-xl-refiner-1-0</code>.`,name:"requires_aesthetics_score"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.force_zeros_for_empty_prompt",description:`<strong>force_zeros_for_empty_prompt</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>&quot;True&quot;</code>) &#x2014;
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
<code>stabilityai/stable-diffusion-xl-base-1-0</code>.`,name:"force_zeros_for_empty_prompt"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.add_watermarker",description:`<strong>add_watermarker</strong> (<code>bool</code>, <em>optional</em>) &#x2014;
Whether to use the <a href="https://github.com/ShieldMnt/invisible-watermark/" rel="nofollow">invisible_watermark library</a> to
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
watermarker will be used.`,name:"add_watermarker"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py#L180"}}),Wt=new se({props:{name:"__call__",anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": Union = None"},{name:"prompt_2",val:": Union = None"},{name:"image",val:": Union = None"},{name:"strength",val:": float = 0.3"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": List = None"},{name:"sigmas",val:": List = None"},{name:"denoising_start",val:": Optional = None"},{name:"denoising_end",val:": Optional = None"},{name:"guidance_scale",val:": float = 5.0"},{name:"negative_prompt",val:": Union = None"},{name:"negative_prompt_2",val:": Union = None"},{name:"num_images_per_prompt",val:": Optional = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": Union = None"},{name:"latents",val:": Optional = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"pooled_prompt_embeds",val:": Optional = None"},{name:"negative_pooled_prompt_embeds",val:": Optional = None"},{name:"ip_adapter_image",val:": Union = None"},{name:"ip_adapter_image_embeds",val:": Optional = None"},{name:"output_type",val:": Optional = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": Optional = None"},{name:"guidance_rescale",val:": float = 0.0"},{name:"original_size",val:": Tuple = None"},{name:"crops_coords_top_left",val:": Tuple = (0, 0)"},{name:"target_size",val:": Tuple = None"},{name:"negative_original_size",val:": Optional = None"},{name:"negative_crops_coords_top_left",val:": Tuple = (0, 0)"},{name:"negative_target_size",val:": Optional = None"},{name:"aesthetic_score",val:": float = 6.0"},{name:"negative_aesthetic_score",val:": float = 2.5"},{name:"clip_skip",val:": Optional = None"},{name:"callback_on_step_end",val:": Union = None"},{name:"callback_on_step_end_tensor_inputs",val:": List = ['latents']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is
used in both text-encoders`,name:"prompt_2"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code> or <code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>List[torch.Tensor]</code> or <code>List[PIL.Image.Image]</code> or <code>List[np.ndarray]</code>) &#x2014;
The image(s) to modify with the pipeline.`,name:"image"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.3) &#x2014;
Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code>
will be used as a starting point, adding more noise to it the larger the <code>strength</code>. The number of
denoising steps depends on the amount of noise initially added. When <code>strength</code> is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
<code>num_inference_steps</code>. A value of 1, therefore, essentially ignores <code>image</code>. Note that in the case of
<code>denoising_start</code> being declared as an integer, the value of <code>strength</code> will be ignored.`,name:"strength"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument
in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is
passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start",description:`<strong>denoising_start</strong> (<code>float</code>, <em>optional</em>) &#x2014;
When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
it is assumed that the passed <code>image</code> is a partly denoised image. Note that when this is specified,
strength will be ignored. The <code>denoising_start</code> parameter is particularly beneficial when this pipeline
is integrated into a &#x201C;Mixture of Denoisers&#x201D; multi-pipeline setup, as detailed in <a href="https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality" rel="nofollow"><strong>Refine Image
Quality</strong></a>.`,name:"denoising_start"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_end",description:`<strong>denoising_end</strong> (<code>float</code>, <em>optional</em>) &#x2014;
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
denoised by a successor pipeline that has <code>denoising_start</code> set to 0.8 so that it only denoises the
final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
forms a part of a &#x201C;Mixture of Denoisers&#x201D; multi-pipeline setup, as elaborated in <a href="https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality" rel="nofollow"><strong>Refine Image
Quality</strong></a>.`,name:"denoising_end"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) &#x2014;
Guidance scale as defined in <a href="https://arxiv.org/abs/2207.12598" rel="nofollow">Classifier-Free Diffusion Guidance</a>.
<code>guidance_scale</code> is defined as <code>w</code> of equation 2. of <a href="https://arxiv.org/pdf/2205.11487.pdf" rel="nofollow">Imagen
Paper</a>. Guidance scale is enabled by setting <code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to the text <code>prompt</code>,
usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and
<code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">https://arxiv.org/abs/2010.02502</a>. Only applies to
<code>schedulers.DDIMScheduler</code>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.
If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code>
input argument.
ip_adapter_image &#x2014; (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. It should
contain the negative image embedding if <code>do_classifier_free_guidance</code> is set to <code>True</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput</code> instead of a
plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Guidance rescale factor proposed by <a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are
Flawed</a> <code>guidance_scale</code> is defined as <code>&#x3C6;</code> in equation 16. of
<a href="https://arxiv.org/pdf/2305.08891.pdf" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are Flawed</a>.
Guidance rescale factor should fix overexposure when using zero terminal SNR.`,name:"guidance_rescale"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.original_size",description:`<strong>original_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
If <code>original_size</code> is not the same as <code>target_size</code> the image will appear to be down- or upsampled.
<code>original_size</code> defaults to <code>(height, width)</code> if not specified. Part of SDXL&#x2019;s micro-conditioning as
explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"original_size"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.crops_coords_top_left",description:`<strong>crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) &#x2014;
<code>crops_coords_top_left</code> can be used to generate an image that appears to be &#x201C;cropped&#x201D; from the position
<code>crops_coords_top_left</code> downwards. Favorable, well-centered images are usually achieved by setting
<code>crops_coords_top_left</code> to (0, 0). Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"crops_coords_top_left"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.target_size",description:`<strong>target_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
For most cases, <code>target_size</code> should be set to the desired height and width of the generated image. If
not specified it will default to <code>(height, width)</code>. Part of SDXL&#x2019;s micro-conditioning as explained in
section 2.2 of <a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"target_size"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.negative_original_size",description:`<strong>negative_original_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
To negatively condition the generation process based on a specific image resolution. Part of SDXL&#x2019;s
micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_original_size"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.negative_crops_coords_top_left",description:`<strong>negative_crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) &#x2014;
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL&#x2019;s
micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_crops_coords_top_left"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.negative_target_size",description:`<strong>negative_target_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
To negatively condition the generation process based on a target image resolution. It should be as same
as the <code>target_size</code> for most cases. Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_target_size"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.aesthetic_score",description:`<strong>aesthetic_score</strong> (<code>float</code>, <em>optional</em>, defaults to 6.0) &#x2014;
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"aesthetic_score"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.negative_aesthetic_score",description:`<strong>negative_aesthetic_score</strong> (<code>float</code>, <em>optional</em>, defaults to 2.5) &#x2014;
Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. Can be used to
simulate an aesthetic score of the generated image by influencing the negative text condition.`,name:"negative_aesthetic_score"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <code>PipelineCallback</code>, <code>MultiPipelineCallbacks</code>, <em>optional</em>) &#x2014;
A function or a subclass of <code>PipelineCallback</code> or <code>MultiPipelineCallbacks</code> that is called at the end of
each denoising step during the inference. with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a
list of all tensors as specified by <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py#L973",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput</code> if <code>return_dict</code> is True, otherwise a
\`tuple. When returning a tuple, the first element is a list with the generated images.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput</code> or <code>tuple</code></p>
`}}),be=new Ni({props:{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.__call__.example",$$slots:{default:[Wa]},$$scope:{ctx:p}}}),Nt=new se({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": str"},{name:"prompt_2",val:": Optional = None"},{name:"device",val:": Optional = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"negative_prompt",val:": Optional = None"},{name:"negative_prompt_2",val:": Optional = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"pooled_prompt_embeds",val:": Optional = None"},{name:"negative_pooled_prompt_embeds",val:": Optional = None"},{name:"lora_scale",val:": Optional = None"},{name:"clip_skip",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is
used in both text-encoders
device &#x2014; (<code>torch.device</code>):
torch device`,name:"prompt_2"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and
<code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.
If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code>
input argument.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) &#x2014;
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py#L296"}}),zt=new se({props:{name:"get_guidance_scale_embedding",anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.get_guidance_scale_embedding",parameters:[{name:"w",val:": Tensor"},{name:"embedding_dim",val:": int = 512"},{name:"dtype",val:": dtype = torch.float32"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.get_guidance_scale_embedding.w",description:`<strong>w</strong> (<code>torch.Tensor</code>) &#x2014;
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.`,name:"w"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.get_guidance_scale_embedding.embedding_dim",description:`<strong>embedding_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014;
Dimension of the embeddings to generate.`,name:"embedding_dim"},{anchor:"diffusers.StableDiffusionXLImg2ImgPipeline.get_guidance_scale_embedding.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>, defaults to <code>torch.float32</code>) &#x2014;
Data type of the generated embeddings.`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py#L904",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>Embedding vectors with shape <code>(len(w), embedding_dim)</code>.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),Bt=new ee({props:{title:"StableDiffusionXLInpaintPipeline",local:"diffusers.StableDiffusionXLInpaintPipeline",headingTag:"h2"}}),Et=new se({props:{name:"class diffusers.StableDiffusionXLInpaintPipeline",anchor:"diffusers.StableDiffusionXLInpaintPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"text_encoder_2",val:": CLIPTextModelWithProjection"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"tokenizer_2",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"},{name:"requires_aesthetics_score",val:": bool = False"},{name:"force_zeros_for_empty_prompt",val:": bool = True"},{name:"add_watermarker",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLInpaintPipeline.vae",description:`<strong>vae</strong> (<code>AutoencoderKL</code>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) &#x2014;
Frozen text-encoder. Stable Diffusion XL uses the text portion of
<a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically
the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code> CLIPTextModelWithProjection</code>) &#x2014;
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
<a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection" rel="nofollow">CLIP</a>,
specifically the
<a href="https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" rel="nofollow">laion/CLIP-ViT-bigG-14-laion2B-39B-b160k</a>
variant.`,name:"text_encoder_2"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) &#x2014;
Tokenizer of class
<a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>CLIPTokenizer</code>) &#x2014;
Second Tokenizer of class
<a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer_2"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.unet",description:"<strong>unet</strong> (<code>UNet2DConditionModel</code>) &#x2014; Conditional U-Net architecture to denoise the encoded image latents.",name:"unet"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<code>SchedulerMixin</code>) &#x2014;
A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of
<code>DDIMScheduler</code>, <code>LMSDiscreteScheduler</code>, or <code>PNDMScheduler</code>.`,name:"scheduler"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.requires_aesthetics_score",description:`<strong>requires_aesthetics_score</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>&quot;False&quot;</code>) &#x2014;
Whether the <code>unet</code> requires a aesthetic_score condition to be passed during inference. Also see the config
of <code>stabilityai/stable-diffusion-xl-refiner-1-0</code>.`,name:"requires_aesthetics_score"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.force_zeros_for_empty_prompt",description:`<strong>force_zeros_for_empty_prompt</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>&quot;True&quot;</code>) &#x2014;
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
<code>stabilityai/stable-diffusion-xl-base-1-0</code>.`,name:"force_zeros_for_empty_prompt"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.add_watermarker",description:`<strong>add_watermarker</strong> (<code>bool</code>, <em>optional</em>) &#x2014;
Whether to use the <a href="https://github.com/ShieldMnt/invisible-watermark/" rel="nofollow">invisible_watermark library</a> to
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
watermarker will be used.`,name:"add_watermarker"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py#L207"}}),Rt=new se({props:{name:"__call__",anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__",parameters:[{name:"prompt",val:": Union = None"},{name:"prompt_2",val:": Union = None"},{name:"image",val:": Union = None"},{name:"mask_image",val:": Union = None"},{name:"masked_image_latents",val:": Tensor = None"},{name:"height",val:": Optional = None"},{name:"width",val:": Optional = None"},{name:"padding_mask_crop",val:": Optional = None"},{name:"strength",val:": float = 0.9999"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": List = None"},{name:"sigmas",val:": List = None"},{name:"denoising_start",val:": Optional = None"},{name:"denoising_end",val:": Optional = None"},{name:"guidance_scale",val:": float = 7.5"},{name:"negative_prompt",val:": Union = None"},{name:"negative_prompt_2",val:": Union = None"},{name:"num_images_per_prompt",val:": Optional = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": Union = None"},{name:"latents",val:": Optional = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"pooled_prompt_embeds",val:": Optional = None"},{name:"negative_pooled_prompt_embeds",val:": Optional = None"},{name:"ip_adapter_image",val:": Union = None"},{name:"ip_adapter_image_embeds",val:": Optional = None"},{name:"output_type",val:": Optional = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": Optional = None"},{name:"guidance_rescale",val:": float = 0.0"},{name:"original_size",val:": Tuple = None"},{name:"crops_coords_top_left",val:": Tuple = (0, 0)"},{name:"target_size",val:": Tuple = None"},{name:"negative_original_size",val:": Optional = None"},{name:"negative_crops_coords_top_left",val:": Tuple = (0, 0)"},{name:"negative_target_size",val:": Optional = None"},{name:"aesthetic_score",val:": float = 6.0"},{name:"negative_aesthetic_score",val:": float = 2.5"},{name:"clip_skip",val:": Optional = None"},{name:"callback_on_step_end",val:": Union = None"},{name:"callback_on_step_end_tensor_inputs",val:": List = ['latents']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is
used in both text-encoders`,name:"prompt_2"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code>) &#x2014;
<code>Image</code>, or tensor representing an image batch which will be inpainted, <em>i.e.</em> parts of the image will
be masked out with <code>mask_image</code> and repainted according to <code>prompt</code>.`,name:"image"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>PIL.Image.Image</code>) &#x2014;
<code>Image</code>, or tensor representing an image batch, to mask <code>image</code>. White pixels in the mask will be
repainted, while black pixels will be preserved. If <code>mask_image</code> is a PIL image, it will be converted
to a single channel (luminance) before use. If it&#x2019;s a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be <code>(B, H, W, 1)</code>.`,name:"mask_image"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The height in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won&#x2019;t work well for
<a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" rel="nofollow">stabilityai/stable-diffusion-xl-base-1.0</a>
and checkpoints that are not specifically fine-tuned on low resolutions.`,name:"height"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The width in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won&#x2019;t work well for
<a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" rel="nofollow">stabilityai/stable-diffusion-xl-base-1.0</a>
and checkpoints that are not specifically fine-tuned on low resolutions.`,name:"width"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.padding_mask_crop",description:`<strong>padding_mask_crop</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014;
The size of margin in the crop to be applied to the image and masking. If <code>None</code>, no crop is applied to
image and mask_image. If <code>padding_mask_crop</code> is not <code>None</code>, it will first find a rectangular region
with the same aspect ration of the image and contains all masked area, and then expand that area based
on <code>padding_mask_crop</code>. The image and mask_image will then be cropped based on the expanded area before
resizing to the original image size for inpainting. This is useful when the masked area is small while
the image is large and contain information irrelevant for inpainting, such as background.`,name:"padding_mask_crop"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9999) &#x2014;
Conceptually, indicates how much to transform the masked portion of the reference <code>image</code>. Must be
between 0 and 1. <code>image</code> will be used as a starting point, adding more noise to it the larger the
<code>strength</code>. The number of denoising steps depends on the amount of noise initially added. When
<code>strength</code> is 1, added noise will be maximum and the denoising process will run for the full number of
iterations specified in <code>num_inference_steps</code>. A value of 1, therefore, essentially ignores the masked
portion of the reference <code>image</code>. Note that in the case of <code>denoising_start</code> being declared as an
integer, the value of <code>strength</code> will be ignored.`,name:"strength"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument
in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is
passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.denoising_start",description:`<strong>denoising_start</strong> (<code>float</code>, <em>optional</em>) &#x2014;
When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
it is assumed that the passed <code>image</code> is a partly denoised image. Note that when this is specified,
strength will be ignored. The <code>denoising_start</code> parameter is particularly beneficial when this pipeline
is integrated into a &#x201C;Mixture of Denoisers&#x201D; multi-pipeline setup, as detailed in <a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output" rel="nofollow"><strong>Refining the Image
Output</strong></a>.`,name:"denoising_start"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.denoising_end",description:`<strong>denoising_end</strong> (<code>float</code>, <em>optional</em>) &#x2014;
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
denoised by a successor pipeline that has <code>denoising_start</code> set to 0.8 so that it only denoises the
final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
forms a part of a &#x201C;Mixture of Denoisers&#x201D; multi-pipeline setup, as elaborated in <a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output" rel="nofollow"><strong>Refining the Image
Output</strong></a>.`,name:"denoising_end"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) &#x2014;
Guidance scale as defined in <a href="https://arxiv.org/abs/2207.12598" rel="nofollow">Classifier-Free Diffusion Guidance</a>.
<code>guidance_scale</code> is defined as <code>w</code> of equation 2. of <a href="https://arxiv.org/pdf/2205.11487.pdf" rel="nofollow">Imagen
Paper</a>. Guidance scale is enabled by setting <code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to the text <code>prompt</code>,
usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and
<code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.
If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code>
input argument.
ip_adapter_image &#x2014; (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. It should
contain the negative image embedding if <code>do_classifier_free_guidance</code> is set to <code>True</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">https://arxiv.org/abs/2010.02502</a>. Only applies to
<code>schedulers.DDIMScheduler</code>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>StableDiffusionPipelineOutput</code> instead of a
plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.original_size",description:`<strong>original_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
If <code>original_size</code> is not the same as <code>target_size</code> the image will appear to be down- or upsampled.
<code>original_size</code> defaults to <code>(height, width)</code> if not specified. Part of SDXL&#x2019;s micro-conditioning as
explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"original_size"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.crops_coords_top_left",description:`<strong>crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) &#x2014;
<code>crops_coords_top_left</code> can be used to generate an image that appears to be &#x201C;cropped&#x201D; from the position
<code>crops_coords_top_left</code> downwards. Favorable, well-centered images are usually achieved by setting
<code>crops_coords_top_left</code> to (0, 0). Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"crops_coords_top_left"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.target_size",description:`<strong>target_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
For most cases, <code>target_size</code> should be set to the desired height and width of the generated image. If
not specified it will default to <code>(height, width)</code>. Part of SDXL&#x2019;s micro-conditioning as explained in
section 2.2 of <a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"target_size"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_original_size",description:`<strong>negative_original_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
To negatively condition the generation process based on a specific image resolution. Part of SDXL&#x2019;s
micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_original_size"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_crops_coords_top_left",description:`<strong>negative_crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) &#x2014;
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL&#x2019;s
micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_crops_coords_top_left"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_target_size",description:`<strong>negative_target_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) &#x2014;
To negatively condition the generation process based on a target image resolution. It should be as same
as the <code>target_size</code> for most cases. Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. For more
information, refer to this issue thread: <a href="https://github.com/huggingface/diffusers/issues/4208" rel="nofollow">https://github.com/huggingface/diffusers/issues/4208</a>.`,name:"negative_target_size"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.aesthetic_score",description:`<strong>aesthetic_score</strong> (<code>float</code>, <em>optional</em>, defaults to 6.0) &#x2014;
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"aesthetic_score"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_aesthetic_score",description:`<strong>negative_aesthetic_score</strong> (<code>float</code>, <em>optional</em>, defaults to 2.5) &#x2014;
Part of SDXL&#x2019;s micro-conditioning as explained in section 2.2 of
<a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>. Can be used to
simulate an aesthetic score of the generated image by influencing the negative text condition.`,name:"negative_aesthetic_score"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <code>PipelineCallback</code>, <code>MultiPipelineCallbacks</code>, <em>optional</em>) &#x2014;
A function or a subclass of <code>PipelineCallback</code> or <code>MultiPipelineCallbacks</code> that is called at the end of
each denoising step during the inference. with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a
list of all tensors as specified by <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py#L1078",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput</code> if <code>return_dict</code> is True, otherwise a
<code>tuple. </code>tuple. When returning a tuple, the first element is a list with the generated images.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput</code> or <code>tuple</code></p>
`}}),ye=new Ni({props:{anchor:"diffusers.StableDiffusionXLInpaintPipeline.__call__.example",$$slots:{default:[Na]},$$scope:{ctx:p}}}),Vt=new se({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt",parameters:[{name:"prompt",val:": str"},{name:"prompt_2",val:": Optional = None"},{name:"device",val:": Optional = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"negative_prompt",val:": Optional = None"},{name:"negative_prompt_2",val:": Optional = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"pooled_prompt_embeds",val:": Optional = None"},{name:"negative_pooled_prompt_embeds",val:": Optional = None"},{name:"lora_scale",val:": Optional = None"},{name:"clip_skip",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is
used in both text-encoders
device &#x2014; (<code>torch.device</code>):
torch device`,name:"prompt_2"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and
<code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.
If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code>
input argument.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) &#x2014;
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py#L400"}}),At=new se({props:{name:"get_guidance_scale_embedding",anchor:"diffusers.StableDiffusionXLInpaintPipeline.get_guidance_scale_embedding",parameters:[{name:"w",val:": Tensor"},{name:"embedding_dim",val:": int = 512"},{name:"dtype",val:": dtype = torch.float32"}],parametersDescription:[{anchor:"diffusers.StableDiffusionXLInpaintPipeline.get_guidance_scale_embedding.w",description:`<strong>w</strong> (<code>torch.Tensor</code>) &#x2014;
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.`,name:"w"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.get_guidance_scale_embedding.embedding_dim",description:`<strong>embedding_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014;
Dimension of the embeddings to generate.`,name:"embedding_dim"},{anchor:"diffusers.StableDiffusionXLInpaintPipeline.get_guidance_scale_embedding.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>, defaults to <code>torch.float32</code>) &#x2014;
Data type of the generated embeddings.`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py#L1009",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>Embedding vectors with shape <code>(len(w), embedding_dim)</code>.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),Ft=new ee({props:{title:"각 텍스트 인코더에 다른 프롬프트를 전달하기",local:"각-텍스트-인코더에-다른-프롬프트를-전달하기",headingTag:"h3"}}),Ht=new Q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblhMUGlwZWxpbmUlMEFpbXBvcnQlMjB0b3JjaCUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25YTFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teGwtYmFzZS0wLjklMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQSUyMyUyME9BSSUyMENMSVAtVmlUJTJGTC0xNCVFQyU5NyU5MCUyMHByb21wdCVFQSVCMCU4MCUyMCVFQyVBMCU4NCVFQiU4QiVBQyVFQiU5MCVBOSVFQiU4QiU4OCVFQiU4QiVBNCUwQXByb21wdCUyMCUzRCUyMCUyMkFzdHJvbmF1dCUyMGluJTIwYSUyMGp1bmdsZSUyQyUyMGNvbGQlMjBjb2xvciUyMHBhbGV0dGUlMkMlMjBtdXRlZCUyMGNvbG9ycyUyQyUyMGRldGFpbGVkJTJDJTIwOGslMjIlMEElMjMlMjBPcGVuQ0xJUC1WaVQlMkZiaWdHLTE0JUVDJTk3JTkwJTIwcHJvbXB0XzIlRUElQjAlODAlMjAlRUMlQTAlODQlRUIlOEIlQUMlRUIlOTAlQTklRUIlOEIlODglRUIlOEIlQTQlMEFwcm9tcHRfMiUyMCUzRCUyMCUyMm1vbmV0JTIwcGFpbnRpbmclMjIlMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTNEcHJvbXB0JTJDJTIwcHJvbXB0XzIlM0Rwcm9tcHRfMikuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionXLPipeline
<span class="hljs-keyword">import</span> torch
pipe = StableDiffusionXLPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-xl-base-0.9&quot;</span>, torch_dtype=torch.float16, variant=<span class="hljs-string">&quot;fp16&quot;</span>, use_safetensors=<span class="hljs-literal">True</span>
)
pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-comment"># OAI CLIP-ViT/L-14에 prompt가 전달됩니다</span>
prompt = <span class="hljs-string">&quot;Astronaut in a jungle, cold color palette, muted colors, detailed, 8k&quot;</span>
<span class="hljs-comment"># OpenCLIP-ViT/bigG-14에 prompt_2가 전달됩니다</span>
prompt_2 = <span class="hljs-string">&quot;monet painting&quot;</span>
image = pipe(prompt=prompt, prompt_2=prompt_2).images[<span class="hljs-number">0</span>]`,wrap:!1}}),Qt=new ka({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/ko/api/pipelines/stable_diffusion/stable_diffusion_xl.md"}}),{c(){t=b("meta"),o=c(),s=b("p"),a=c(),X(_.$$.fragment),l=c(),d=b("p"),d.innerHTML=I,M=c(),y=b("p"),y.textContent=f,u=c(),h=b("p"),h.innerHTML=g,x=c(),X(j.$$.fragment),E=c(),G=b("ul"),G.innerHTML=oe,ie=c(),X($.$$.fragment),R=c(),N=b("ul"),N.innerHTML=z,W=c(),X(B.$$.fragment),V=c(),D=b("p"),D.innerHTML=C,A=c(),X(te.$$.fragment),q=c(),X(de.$$.fragment),Xn=c(),Je=b("p"),Je.innerHTML=Bi,Un=c(),X(Le.$$.fragment),Sn=c(),Ce=b("p"),Ce.innerHTML=Ei,Pn=c(),Xe=b("p"),Xe.textContent=Ri,Zn=c(),X(Ue.$$.fragment),jn=c(),X(Se.$$.fragment),Dn=c(),Pe=b("p"),Pe.innerHTML=Vi,Gn=c(),X(Ze.$$.fragment),$n=c(),X(je.$$.fragment),Wn=c(),De=b("p"),De.innerHTML=Ai,Nn=c(),X(Ge.$$.fragment),zn=c(),X($e.$$.fragment),Bn=c(),We=b("p"),We.innerHTML=Fi,En=c(),X(Ne.$$.fragment),Rn=c(),X(ze.$$.fragment),Vn=c(),Be=b("p"),Be.innerHTML=Yi,An=c(),Ee=b("p"),Ee.textContent=Hi,Fn=c(),Re=b("ul"),Re.innerHTML=Qi,Yn=c(),Ve=b("p"),Ve.innerHTML=qi,Hn=c(),Ae=b("ul"),Ae.innerHTML=Oi,Qn=c(),X(Fe.$$.fragment),qn=c(),Ye=b("p"),Ye.textContent=Ki,On=c(),He=b("p"),He.textContent=es,Kn=c(),Qe=b("p"),Qe.innerHTML=ts,eo=c(),qe=b("p"),qe.innerHTML=ns,to=c(),Oe=b("p"),Oe.textContent=os,no=c(),X(Ke.$$.fragment),oo=c(),et=b("p"),et.innerHTML=is,io=c(),X(tt.$$.fragment),so=c(),nt=b("p"),nt.innerHTML=ss,ao=c(),ot=b("p"),ot.innerHTML=as,lo=c(),it=b("p"),it.innerHTML=ls,ro=c(),X(st.$$.fragment),po=c(),at=b("p"),at.textContent=rs,co=c(),lt=b("table"),lt.innerHTML=ds,mo=c(),rt=b("p"),rt.textContent=ps,fo=c(),X(fe.$$.fragment),go=c(),X(dt.$$.fragment),uo=c(),pt=b("p"),pt.innerHTML=cs,_o=c(),ct=b("p"),ct.textContent=ms,ho=c(),X(mt.$$.fragment),bo=c(),ft=b("table"),ft.innerHTML=fs,wo=c(),X(ge.$$.fragment),vo=c(),gt=b("p"),gt.textContent=gs,yo=c(),X(ut.$$.fragment),Mo=c(),_t=b("p"),_t.innerHTML=us,xo=c(),X(ht.$$.fragment),Io=c(),bt=b("p"),bt.innerHTML=_s,To=c(),X(wt.$$.fragment),ko=c(),X(vt.$$.fragment),Jo=c(),yt=b("p"),yt.innerHTML=hs,Lo=c(),X(Mt.$$.fragment),Co=c(),xt=b("p"),xt.textContent=bs,Xo=c(),X(It.$$.fragment),Uo=c(),X(Tt.$$.fragment),So=c(),kt=b("p"),kt.innerHTML=ws,Po=c(),X(Jt.$$.fragment),Zo=c(),X(Lt.$$.fragment),jo=c(),Ct=b("p"),Ct.innerHTML=vs,Do=c(),X(Xt.$$.fragment),Go=c(),X(Ut.$$.fragment),$o=c(),X(St.$$.fragment),Wo=c(),F=b("div"),X(Pt.$$.fragment),li=c(),nn=b("p"),nn.textContent=ys,ri=c(),on=b("p"),on.innerHTML=Ms,di=c(),sn=b("p"),sn.textContent=xs,pi=c(),an=b("ul"),an.innerHTML=Is,ci=c(),ae=b("div"),X(Zt.$$.fragment),mi=c(),ln=b("p"),ln.textContent=Ts,fi=c(),X(ue.$$.fragment),gi=c(),_e=b("div"),X(jt.$$.fragment),ui=c(),rn=b("p"),rn.textContent=ks,_i=c(),he=b("div"),X(Dt.$$.fragment),hi=c(),dn=b("p"),dn.innerHTML=Js,No=c(),X(Gt.$$.fragment),zo=c(),Y=b("div"),X($t.$$.fragment),bi=c(),pn=b("p"),pn.textContent=Ls,wi=c(),cn=b("p"),cn.innerHTML=Cs,vi=c(),mn=b("p"),mn.textContent=Xs,yi=c(),fn=b("ul"),fn.innerHTML=Us,Mi=c(),le=b("div"),X(Wt.$$.fragment),xi=c(),gn=b("p"),gn.textContent=Ss,Ii=c(),X(be.$$.fragment),Ti=c(),we=b("div"),X(Nt.$$.fragment),ki=c(),un=b("p"),un.textContent=Ps,Ji=c(),ve=b("div"),X(zt.$$.fragment),Li=c(),_n=b("p"),_n.innerHTML=Zs,Bo=c(),X(Bt.$$.fragment),Eo=c(),H=b("div"),X(Et.$$.fragment),Ci=c(),hn=b("p"),hn.textContent=js,Xi=c(),bn=b("p"),bn.innerHTML=Ds,Ui=c(),wn=b("p"),wn.textContent=Gs,Si=c(),vn=b("ul"),vn.innerHTML=$s,Pi=c(),re=b("div"),X(Rt.$$.fragment),Zi=c(),yn=b("p"),yn.textContent=Ws,ji=c(),X(ye.$$.fragment),Di=c(),Me=b("div"),X(Vt.$$.fragment),Gi=c(),Mn=b("p"),Mn.textContent=Ns,$i=c(),xe=b("div"),X(At.$$.fragment),Wi=c(),xn=b("p"),xn.innerHTML=zs,Ro=c(),X(Ft.$$.fragment),Vo=c(),Yt=b("p"),Yt.innerHTML=Bs,Ao=c(),X(Ht.$$.fragment),Fo=c(),X(Qt.$$.fragment),Yo=c(),kn=b("p"),this.h()},l(e){const i=Ta("svelte-u9bgzb",document.head);t=w(i,"META",{name:!0,content:!0}),i.forEach(n),o=m(e),s=w(e,"P",{}),Z(s).forEach(n),a=m(e),U(_.$$.fragment,e),l=m(e),d=w(e,"P",{"data-svelte-h":!0}),J(d)!=="svelte-dvz29e"&&(d.innerHTML=I),M=m(e),y=w(e,"P",{"data-svelte-h":!0}),J(y)!=="svelte-751ew2"&&(y.textContent=f),u=m(e),h=w(e,"P",{"data-svelte-h":!0}),J(h)!=="svelte-4msd2m"&&(h.innerHTML=g),x=m(e),U(j.$$.fragment,e),E=m(e),G=w(e,"UL",{"data-svelte-h":!0}),J(G)!=="svelte-5ytu1o"&&(G.innerHTML=oe),ie=m(e),U($.$$.fragment,e),R=m(e),N=w(e,"UL",{"data-svelte-h":!0}),J(N)!=="svelte-di92c4"&&(N.innerHTML=z),W=m(e),U(B.$$.fragment,e),V=m(e),D=w(e,"P",{"data-svelte-h":!0}),J(D)!=="svelte-1w7000q"&&(D.innerHTML=C),A=m(e),U(te.$$.fragment,e),q=m(e),U(de.$$.fragment,e),Xn=m(e),Je=w(e,"P",{"data-svelte-h":!0}),J(Je)!=="svelte-kyafz4"&&(Je.innerHTML=Bi),Un=m(e),U(Le.$$.fragment,e),Sn=m(e),Ce=w(e,"P",{"data-svelte-h":!0}),J(Ce)!=="svelte-a2n1wc"&&(Ce.innerHTML=Ei),Pn=m(e),Xe=w(e,"P",{"data-svelte-h":!0}),J(Xe)!=="svelte-165oipa"&&(Xe.textContent=Ri),Zn=m(e),U(Ue.$$.fragment,e),jn=m(e),U(Se.$$.fragment,e),Dn=m(e),Pe=w(e,"P",{"data-svelte-h":!0}),J(Pe)!=="svelte-15ehpn0"&&(Pe.innerHTML=Vi),Gn=m(e),U(Ze.$$.fragment,e),$n=m(e),U(je.$$.fragment,e),Wn=m(e),De=w(e,"P",{"data-svelte-h":!0}),J(De)!=="svelte-afe016"&&(De.innerHTML=Ai),Nn=m(e),U(Ge.$$.fragment,e),zn=m(e),U($e.$$.fragment,e),Bn=m(e),We=w(e,"P",{"data-svelte-h":!0}),J(We)!=="svelte-1aa6do"&&(We.innerHTML=Fi),En=m(e),U(Ne.$$.fragment,e),Rn=m(e),U(ze.$$.fragment,e),Vn=m(e),Be=w(e,"P",{"data-svelte-h":!0}),J(Be)!=="svelte-1g01b00"&&(Be.innerHTML=Yi),An=m(e),Ee=w(e,"P",{"data-svelte-h":!0}),J(Ee)!=="svelte-1pmtgbh"&&(Ee.textContent=Hi),Fn=m(e),Re=w(e,"UL",{"data-svelte-h":!0}),J(Re)!=="svelte-198scoo"&&(Re.innerHTML=Qi),Yn=m(e),Ve=w(e,"P",{"data-svelte-h":!0}),J(Ve)!=="svelte-1f6bpfu"&&(Ve.innerHTML=qi),Hn=m(e),Ae=w(e,"UL",{"data-svelte-h":!0}),J(Ae)!=="svelte-rlyt42"&&(Ae.innerHTML=Oi),Qn=m(e),U(Fe.$$.fragment,e),qn=m(e),Ye=w(e,"P",{"data-svelte-h":!0}),J(Ye)!=="svelte-7bpd15"&&(Ye.textContent=Ki),On=m(e),He=w(e,"P",{"data-svelte-h":!0}),J(He)!=="svelte-1famj0a"&&(He.textContent=es),Kn=m(e),Qe=w(e,"P",{"data-svelte-h":!0}),J(Qe)!=="svelte-1t7r328"&&(Qe.innerHTML=ts),eo=m(e),qe=w(e,"P",{"data-svelte-h":!0}),J(qe)!=="svelte-4keux7"&&(qe.innerHTML=ns),to=m(e),Oe=w(e,"P",{"data-svelte-h":!0}),J(Oe)!=="svelte-13zf3p1"&&(Oe.textContent=os),no=m(e),U(Ke.$$.fragment,e),oo=m(e),et=w(e,"P",{"data-svelte-h":!0}),J(et)!=="svelte-14b4vpp"&&(et.innerHTML=is),io=m(e),U(tt.$$.fragment,e),so=m(e),nt=w(e,"P",{"data-svelte-h":!0}),J(nt)!=="svelte-ruiwac"&&(nt.innerHTML=ss),ao=m(e),ot=w(e,"P",{"data-svelte-h":!0}),J(ot)!=="svelte-j7iwr8"&&(ot.innerHTML=as),lo=m(e),it=w(e,"P",{"data-svelte-h":!0}),J(it)!=="svelte-xwbcec"&&(it.innerHTML=ls),ro=m(e),U(st.$$.fragment,e),po=m(e),at=w(e,"P",{"data-svelte-h":!0}),J(at)!=="svelte-1ddfnqv"&&(at.textContent=rs),co=m(e),lt=w(e,"TABLE",{"data-svelte-h":!0}),J(lt)!=="svelte-ccy4g7"&&(lt.innerHTML=ds),mo=m(e),rt=w(e,"P",{"data-svelte-h":!0}),J(rt)!=="svelte-1gsmzmg"&&(rt.textContent=ps),fo=m(e),U(fe.$$.fragment,e),go=m(e),U(dt.$$.fragment,e),uo=m(e),pt=w(e,"P",{"data-svelte-h":!0}),J(pt)!=="svelte-1erjclv"&&(pt.innerHTML=cs),_o=m(e),ct=w(e,"P",{"data-svelte-h":!0}),J(ct)!=="svelte-r9vx6d"&&(ct.textContent=ms),ho=m(e),U(mt.$$.fragment,e),bo=m(e),ft=w(e,"TABLE",{"data-svelte-h":!0}),J(ft)!=="svelte-1ien1ba"&&(ft.innerHTML=fs),wo=m(e),U(ge.$$.fragment,e),vo=m(e),gt=w(e,"P",{"data-svelte-h":!0}),J(gt)!=="svelte-bt9rrd"&&(gt.textContent=gs),yo=m(e),U(ut.$$.fragment,e),Mo=m(e),_t=w(e,"P",{"data-svelte-h":!0}),J(_t)!=="svelte-uz35qd"&&(_t.innerHTML=us),xo=m(e),U(ht.$$.fragment,e),Io=m(e),bt=w(e,"P",{"data-svelte-h":!0}),J(bt)!=="svelte-2dqdta"&&(bt.innerHTML=_s),To=m(e),U(wt.$$.fragment,e),ko=m(e),U(vt.$$.fragment,e),Jo=m(e),yt=w(e,"P",{"data-svelte-h":!0}),J(yt)!=="svelte-5zv2ts"&&(yt.innerHTML=hs),Lo=m(e),U(Mt.$$.fragment,e),Co=m(e),xt=w(e,"P",{"data-svelte-h":!0}),J(xt)!=="svelte-1uuu1zw"&&(xt.textContent=bs),Xo=m(e),U(It.$$.fragment,e),Uo=m(e),U(Tt.$$.fragment,e),So=m(e),kt=w(e,"P",{"data-svelte-h":!0}),J(kt)!=="svelte-vijzrt"&&(kt.innerHTML=ws),Po=m(e),U(Jt.$$.fragment,e),Zo=m(e),U(Lt.$$.fragment,e),jo=m(e),Ct=w(e,"P",{"data-svelte-h":!0}),J(Ct)!=="svelte-twtngu"&&(Ct.innerHTML=vs),Do=m(e),U(Xt.$$.fragment,e),Go=m(e),U(Ut.$$.fragment,e),$o=m(e),U(St.$$.fragment,e),Wo=m(e),F=w(e,"DIV",{class:!0});var O=Z(F);U(Pt.$$.fragment,O),li=m(O),nn=w(O,"P",{"data-svelte-h":!0}),J(nn)!=="svelte-1q4yve"&&(nn.textContent=ys),ri=m(O),on=w(O,"P",{"data-svelte-h":!0}),J(on)!=="svelte-ikpvdt"&&(on.innerHTML=Ms),di=m(O),sn=w(O,"P",{"data-svelte-h":!0}),J(sn)!=="svelte-14s6m4u"&&(sn.textContent=xs),pi=m(O),an=w(O,"UL",{"data-svelte-h":!0}),J(an)!=="svelte-1p5kbtu"&&(an.innerHTML=Is),ci=m(O),ae=w(O,"DIV",{class:!0});var pe=Z(ae);U(Zt.$$.fragment,pe),mi=m(pe),ln=w(pe,"P",{"data-svelte-h":!0}),J(ln)!=="svelte-v78lg8"&&(ln.textContent=Ts),fi=m(pe),U(ue.$$.fragment,pe),pe.forEach(n),gi=m(O),_e=w(O,"DIV",{class:!0});var qt=Z(_e);U(jt.$$.fragment,qt),ui=m(qt),rn=w(qt,"P",{"data-svelte-h":!0}),J(rn)!=="svelte-16q0ax1"&&(rn.textContent=ks),qt.forEach(n),_i=m(O),he=w(O,"DIV",{class:!0});var Ot=Z(he);U(Dt.$$.fragment,Ot),hi=m(Ot),dn=w(Ot,"P",{"data-svelte-h":!0}),J(dn)!=="svelte-vo59ec"&&(dn.innerHTML=Js),Ot.forEach(n),O.forEach(n),No=m(e),U(Gt.$$.fragment,e),zo=m(e),Y=w(e,"DIV",{class:!0});var K=Z(Y);U($t.$$.fragment,K),bi=m(K),pn=w(K,"P",{"data-svelte-h":!0}),J(pn)!=="svelte-1q4yve"&&(pn.textContent=Ls),wi=m(K),cn=w(K,"P",{"data-svelte-h":!0}),J(cn)!=="svelte-ikpvdt"&&(cn.innerHTML=Cs),vi=m(K),mn=w(K,"P",{"data-svelte-h":!0}),J(mn)!=="svelte-14s6m4u"&&(mn.textContent=Xs),yi=m(K),fn=w(K,"UL",{"data-svelte-h":!0}),J(fn)!=="svelte-1p5kbtu"&&(fn.innerHTML=Us),Mi=m(K),le=w(K,"DIV",{class:!0});var In=Z(le);U(Wt.$$.fragment,In),xi=m(In),gn=w(In,"P",{"data-svelte-h":!0}),J(gn)!=="svelte-v78lg8"&&(gn.textContent=Ss),Ii=m(In),U(be.$$.fragment,In),In.forEach(n),Ti=m(K),we=w(K,"DIV",{class:!0});var Qo=Z(we);U(Nt.$$.fragment,Qo),ki=m(Qo),un=w(Qo,"P",{"data-svelte-h":!0}),J(un)!=="svelte-16q0ax1"&&(un.textContent=Ps),Qo.forEach(n),Ji=m(K),ve=w(K,"DIV",{class:!0});var qo=Z(ve);U(zt.$$.fragment,qo),Li=m(qo),_n=w(qo,"P",{"data-svelte-h":!0}),J(_n)!=="svelte-vo59ec"&&(_n.innerHTML=Zs),qo.forEach(n),K.forEach(n),Bo=m(e),U(Bt.$$.fragment,e),Eo=m(e),H=w(e,"DIV",{class:!0});var ne=Z(H);U(Et.$$.fragment,ne),Ci=m(ne),hn=w(ne,"P",{"data-svelte-h":!0}),J(hn)!=="svelte-1q4yve"&&(hn.textContent=js),Xi=m(ne),bn=w(ne,"P",{"data-svelte-h":!0}),J(bn)!=="svelte-ikpvdt"&&(bn.innerHTML=Ds),Ui=m(ne),wn=w(ne,"P",{"data-svelte-h":!0}),J(wn)!=="svelte-14s6m4u"&&(wn.textContent=Gs),Si=m(ne),vn=w(ne,"UL",{"data-svelte-h":!0}),J(vn)!=="svelte-1p5kbtu"&&(vn.innerHTML=$s),Pi=m(ne),re=w(ne,"DIV",{class:!0});var Tn=Z(re);U(Rt.$$.fragment,Tn),Zi=m(Tn),yn=w(Tn,"P",{"data-svelte-h":!0}),J(yn)!=="svelte-v78lg8"&&(yn.textContent=Ws),ji=m(Tn),U(ye.$$.fragment,Tn),Tn.forEach(n),Di=m(ne),Me=w(ne,"DIV",{class:!0});var Oo=Z(Me);U(Vt.$$.fragment,Oo),Gi=m(Oo),Mn=w(Oo,"P",{"data-svelte-h":!0}),J(Mn)!=="svelte-16q0ax1"&&(Mn.textContent=Ns),Oo.forEach(n),$i=m(ne),xe=w(ne,"DIV",{class:!0});var Ko=Z(xe);U(At.$$.fragment,Ko),Wi=m(Ko),xn=w(Ko,"P",{"data-svelte-h":!0}),J(xn)!=="svelte-vo59ec"&&(xn.innerHTML=zs),Ko.forEach(n),ne.forEach(n),Ro=m(e),U(Ft.$$.fragment,e),Vo=m(e),Yt=w(e,"P",{"data-svelte-h":!0}),J(Yt)!=="svelte-1rafzv7"&&(Yt.innerHTML=Bs),Ao=m(e),U(Ht.$$.fragment,e),Fo=m(e),U(Qt.$$.fragment,e),Yo=m(e),kn=w(e,"P",{}),Z(kn).forEach(n),this.h()},h(){T(t,"name","hf:doc:metadata"),T(t,"content",Ba),T(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(we,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,i){v(document.head,t),r(e,o,i),r(e,s,i),r(e,a,i),S(_,e,i),r(e,l,i),r(e,d,i),r(e,M,i),r(e,y,i),r(e,u,i),r(e,h,i),r(e,x,i),S(j,e,i),r(e,E,i),r(e,G,i),r(e,ie,i),S($,e,i),r(e,R,i),r(e,N,i),r(e,W,i),S(B,e,i),r(e,V,i),r(e,D,i),r(e,A,i),S(te,e,i),r(e,q,i),S(de,e,i),r(e,Xn,i),r(e,Je,i),r(e,Un,i),S(Le,e,i),r(e,Sn,i),r(e,Ce,i),r(e,Pn,i),r(e,Xe,i),r(e,Zn,i),S(Ue,e,i),r(e,jn,i),S(Se,e,i),r(e,Dn,i),r(e,Pe,i),r(e,Gn,i),S(Ze,e,i),r(e,$n,i),S(je,e,i),r(e,Wn,i),r(e,De,i),r(e,Nn,i),S(Ge,e,i),r(e,zn,i),S($e,e,i),r(e,Bn,i),r(e,We,i),r(e,En,i),S(Ne,e,i),r(e,Rn,i),S(ze,e,i),r(e,Vn,i),r(e,Be,i),r(e,An,i),r(e,Ee,i),r(e,Fn,i),r(e,Re,i),r(e,Yn,i),r(e,Ve,i),r(e,Hn,i),r(e,Ae,i),r(e,Qn,i),S(Fe,e,i),r(e,qn,i),r(e,Ye,i),r(e,On,i),r(e,He,i),r(e,Kn,i),r(e,Qe,i),r(e,eo,i),r(e,qe,i),r(e,to,i),r(e,Oe,i),r(e,no,i),S(Ke,e,i),r(e,oo,i),r(e,et,i),r(e,io,i),S(tt,e,i),r(e,so,i),r(e,nt,i),r(e,ao,i),r(e,ot,i),r(e,lo,i),r(e,it,i),r(e,ro,i),S(st,e,i),r(e,po,i),r(e,at,i),r(e,co,i),r(e,lt,i),r(e,mo,i),r(e,rt,i),r(e,fo,i),S(fe,e,i),r(e,go,i),S(dt,e,i),r(e,uo,i),r(e,pt,i),r(e,_o,i),r(e,ct,i),r(e,ho,i),S(mt,e,i),r(e,bo,i),r(e,ft,i),r(e,wo,i),S(ge,e,i),r(e,vo,i),r(e,gt,i),r(e,yo,i),S(ut,e,i),r(e,Mo,i),r(e,_t,i),r(e,xo,i),S(ht,e,i),r(e,Io,i),r(e,bt,i),r(e,To,i),S(wt,e,i),r(e,ko,i),S(vt,e,i),r(e,Jo,i),r(e,yt,i),r(e,Lo,i),S(Mt,e,i),r(e,Co,i),r(e,xt,i),r(e,Xo,i),S(It,e,i),r(e,Uo,i),S(Tt,e,i),r(e,So,i),r(e,kt,i),r(e,Po,i),S(Jt,e,i),r(e,Zo,i),S(Lt,e,i),r(e,jo,i),r(e,Ct,i),r(e,Do,i),S(Xt,e,i),r(e,Go,i),S(Ut,e,i),r(e,$o,i),S(St,e,i),r(e,Wo,i),r(e,F,i),S(Pt,F,null),v(F,li),v(F,nn),v(F,ri),v(F,on),v(F,di),v(F,sn),v(F,pi),v(F,an),v(F,ci),v(F,ae),S(Zt,ae,null),v(ae,mi),v(ae,ln),v(ae,fi),S(ue,ae,null),v(F,gi),v(F,_e),S(jt,_e,null),v(_e,ui),v(_e,rn),v(F,_i),v(F,he),S(Dt,he,null),v(he,hi),v(he,dn),r(e,No,i),S(Gt,e,i),r(e,zo,i),r(e,Y,i),S($t,Y,null),v(Y,bi),v(Y,pn),v(Y,wi),v(Y,cn),v(Y,vi),v(Y,mn),v(Y,yi),v(Y,fn),v(Y,Mi),v(Y,le),S(Wt,le,null),v(le,xi),v(le,gn),v(le,Ii),S(be,le,null),v(Y,Ti),v(Y,we),S(Nt,we,null),v(we,ki),v(we,un),v(Y,Ji),v(Y,ve),S(zt,ve,null),v(ve,Li),v(ve,_n),r(e,Bo,i),S(Bt,e,i),r(e,Eo,i),r(e,H,i),S(Et,H,null),v(H,Ci),v(H,hn),v(H,Xi),v(H,bn),v(H,Ui),v(H,wn),v(H,Si),v(H,vn),v(H,Pi),v(H,re),S(Rt,re,null),v(re,Zi),v(re,yn),v(re,ji),S(ye,re,null),v(H,Di),v(H,Me),S(Vt,Me,null),v(Me,Gi),v(Me,Mn),v(H,$i),v(H,xe),S(At,xe,null),v(xe,Wi),v(xe,xn),r(e,Ro,i),S(Ft,e,i),r(e,Vo,i),r(e,Yt,i),r(e,Ao,i),S(Ht,e,i),r(e,Fo,i),S(Qt,e,i),r(e,Yo,i),r(e,kn,i),Ho=!0},p(e,[i]){const O={};i&2&&(O.$$scope={dirty:i,ctx:e}),fe.$set(O);const pe={};i&2&&(pe.$$scope={dirty:i,ctx:e}),ge.$set(pe);const qt={};i&2&&(qt.$$scope={dirty:i,ctx:e}),ue.$set(qt);const Ot={};i&2&&(Ot.$$scope={dirty:i,ctx:e}),be.$set(Ot);const K={};i&2&&(K.$$scope={dirty:i,ctx:e}),ye.$set(K)},i(e){Ho||(k(_.$$.fragment,e),k(j.$$.fragment,e),k($.$$.fragment,e),k(B.$$.fragment,e),k(te.$$.fragment,e),k(de.$$.fragment,e),k(Le.$$.fragment,e),k(Ue.$$.fragment,e),k(Se.$$.fragment,e),k(Ze.$$.fragment,e),k(je.$$.fragment,e),k(Ge.$$.fragment,e),k($e.$$.fragment,e),k(Ne.$$.fragment,e),k(ze.$$.fragment,e),k(Fe.$$.fragment,e),k(Ke.$$.fragment,e),k(tt.$$.fragment,e),k(st.$$.fragment,e),k(fe.$$.fragment,e),k(dt.$$.fragment,e),k(mt.$$.fragment,e),k(ge.$$.fragment,e),k(ut.$$.fragment,e),k(ht.$$.fragment,e),k(wt.$$.fragment,e),k(vt.$$.fragment,e),k(Mt.$$.fragment,e),k(It.$$.fragment,e),k(Tt.$$.fragment,e),k(Jt.$$.fragment,e),k(Lt.$$.fragment,e),k(Xt.$$.fragment,e),k(Ut.$$.fragment,e),k(St.$$.fragment,e),k(Pt.$$.fragment,e),k(Zt.$$.fragment,e),k(ue.$$.fragment,e),k(jt.$$.fragment,e),k(Dt.$$.fragment,e),k(Gt.$$.fragment,e),k($t.$$.fragment,e),k(Wt.$$.fragment,e),k(be.$$.fragment,e),k(Nt.$$.fragment,e),k(zt.$$.fragment,e),k(Bt.$$.fragment,e),k(Et.$$.fragment,e),k(Rt.$$.fragment,e),k(ye.$$.fragment,e),k(Vt.$$.fragment,e),k(At.$$.fragment,e),k(Ft.$$.fragment,e),k(Ht.$$.fragment,e),k(Qt.$$.fragment,e),Ho=!0)},o(e){L(_.$$.fragment,e),L(j.$$.fragment,e),L($.$$.fragment,e),L(B.$$.fragment,e),L(te.$$.fragment,e),L(de.$$.fragment,e),L(Le.$$.fragment,e),L(Ue.$$.fragment,e),L(Se.$$.fragment,e),L(Ze.$$.fragment,e),L(je.$$.fragment,e),L(Ge.$$.fragment,e),L($e.$$.fragment,e),L(Ne.$$.fragment,e),L(ze.$$.fragment,e),L(Fe.$$.fragment,e),L(Ke.$$.fragment,e),L(tt.$$.fragment,e),L(st.$$.fragment,e),L(fe.$$.fragment,e),L(dt.$$.fragment,e),L(mt.$$.fragment,e),L(ge.$$.fragment,e),L(ut.$$.fragment,e),L(ht.$$.fragment,e),L(wt.$$.fragment,e),L(vt.$$.fragment,e),L(Mt.$$.fragment,e),L(It.$$.fragment,e),L(Tt.$$.fragment,e),L(Jt.$$.fragment,e),L(Lt.$$.fragment,e),L(Xt.$$.fragment,e),L(Ut.$$.fragment,e),L(St.$$.fragment,e),L(Pt.$$.fragment,e),L(Zt.$$.fragment,e),L(ue.$$.fragment,e),L(jt.$$.fragment,e),L(Dt.$$.fragment,e),L(Gt.$$.fragment,e),L($t.$$.fragment,e),L(Wt.$$.fragment,e),L(be.$$.fragment,e),L(Nt.$$.fragment,e),L(zt.$$.fragment,e),L(Bt.$$.fragment,e),L(Et.$$.fragment,e),L(Rt.$$.fragment,e),L(ye.$$.fragment,e),L(Vt.$$.fragment,e),L(At.$$.fragment,e),L(Ft.$$.fragment,e),L(Ht.$$.fragment,e),L(Qt.$$.fragment,e),Ho=!1},d(e){e&&(n(o),n(s),n(a),n(l),n(d),n(M),n(y),n(u),n(h),n(x),n(E),n(G),n(ie),n(R),n(N),n(W),n(V),n(D),n(A),n(q),n(Xn),n(Je),n(Un),n(Sn),n(Ce),n(Pn),n(Xe),n(Zn),n(jn),n(Dn),n(Pe),n(Gn),n($n),n(Wn),n(De),n(Nn),n(zn),n(Bn),n(We),n(En),n(Rn),n(Vn),n(Be),n(An),n(Ee),n(Fn),n(Re),n(Yn),n(Ve),n(Hn),n(Ae),n(Qn),n(qn),n(Ye),n(On),n(He),n(Kn),n(Qe),n(eo),n(qe),n(to),n(Oe),n(no),n(oo),n(et),n(io),n(so),n(nt),n(ao),n(ot),n(lo),n(it),n(ro),n(po),n(at),n(co),n(lt),n(mo),n(rt),n(fo),n(go),n(uo),n(pt),n(_o),n(ct),n(ho),n(bo),n(ft),n(wo),n(vo),n(gt),n(yo),n(Mo),n(_t),n(xo),n(Io),n(bt),n(To),n(ko),n(Jo),n(yt),n(Lo),n(Co),n(xt),n(Xo),n(Uo),n(So),n(kt),n(Po),n(Zo),n(jo),n(Ct),n(Do),n(Go),n($o),n(Wo),n(F),n(No),n(zo),n(Y),n(Bo),n(Eo),n(H),n(Ro),n(Vo),n(Yt),n(Ao),n(Fo),n(Yo),n(kn)),n(t),P(_,e),P(j,e),P($,e),P(B,e),P(te,e),P(de,e),P(Le,e),P(Ue,e),P(Se,e),P(Ze,e),P(je,e),P(Ge,e),P($e,e),P(Ne,e),P(ze,e),P(Fe,e),P(Ke,e),P(tt,e),P(st,e),P(fe,e),P(dt,e),P(mt,e),P(ge,e),P(ut,e),P(ht,e),P(wt,e),P(vt,e),P(Mt,e),P(It,e),P(Tt,e),P(Jt,e),P(Lt,e),P(Xt,e),P(Ut,e),P(St,e),P(Pt),P(Zt),P(ue),P(jt),P(Dt),P(Gt,e),P($t),P(Wt),P(be),P(Nt),P(zt),P(Bt,e),P(Et),P(Rt),P(ye),P(Vt),P(At),P(Ft,e),P(Ht,e),P(Qt,e)}}}const Ba='{"title":"Stable diffusion XL","local":"stable-diffusion-xl","sections":[{"title":"팁","local":"팁","sections":[{"title":"이용가능한 체크포인트:","local":"이용가능한-체크포인트","sections":[],"depth":3}],"depth":2},{"title":"사용 예시","local":"사용-예시","sections":[{"title":"워터마커","local":"워터마커","sections":[],"depth":3},{"title":"Text-to-Image","local":"text-to-image","sections":[],"depth":3},{"title":"Image-to-image","local":"image-to-image","sections":[],"depth":3},{"title":"인페인팅","local":"인페인팅","sections":[],"depth":3},{"title":"이미지 결과물을 정제하기","local":"이미지-결과물을-정제하기","sections":[{"title":"1.) Denoisers의 앙상블","local":"1-denoisers의-앙상블","sections":[],"depth":4},{"title":"2.) 노이즈가 완전히 제거된 기본 이미지에서 이미지 출력을 정제하기","local":"2-노이즈가-완전히-제거된-기본-이미지에서-이미지-출력을-정제하기","sections":[],"depth":4}],"depth":3},{"title":"단독 체크포인트 파일 / 원래의 파일 형식으로 불러오기","local":"단독-체크포인트-파일--원래의-파일-형식으로-불러오기","sections":[],"depth":3},{"title":"모델 offloading을 통해 메모리 최적화하기","local":"모델-offloading을-통해-메모리-최적화하기","sections":[],"depth":3},{"title":"torch.compile 로 추론 속도를 올리기","local":"torchcompile-로-추론-속도를-올리기","sections":[],"depth":3},{"title":"torch &lt; 2.0 일 때 실행하기","local":"torch-lt-20-일-때-실행하기","sections":[],"depth":3}],"depth":2},{"title":"StableDiffusionXLPipeline","local":"diffusers.StableDiffusionXLPipeline","sections":[],"depth":2},{"title":"StableDiffusionXLImg2ImgPipeline","local":"diffusers.StableDiffusionXLImg2ImgPipeline","sections":[],"depth":2},{"title":"StableDiffusionXLInpaintPipeline","local":"diffusers.StableDiffusionXLInpaintPipeline","sections":[{"title":"각 텍스트 인코더에 다른 프롬프트를 전달하기","local":"각-텍스트-인코더에-다른-프롬프트를-전달하기","sections":[],"depth":3}],"depth":2}],"depth":1}';function Ea(p){return zi(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class qa extends oi{constructor(t){super(),ii(this,t,Ea,za,ni,{})}}export{qa as component};

Xet Storage Details

Size:
200 kB
·
Xet hash:
6a8e3b4c4cb8fc1aef878f7198a0beb3beace24533a3d725c81c53eb4e5554e4

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.