Buckets:

rtrm's picture
download
raw
7.77 kB
import{s as te,a as ae,n as le,o as se}from"../chunks/scheduler.23542ac5.js";import{S as ie,i as ne,e as p,s,c as $,h as oe,a as m,d as a,b as i,f as Y,g as T,j as U,k as j,l as pe,m as l,n as J,t as _,o as k,p as v}from"../chunks/index.9b1f405b.js";import{C as me,H as re,E as de}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.7d2ec626.js";import{C as ee}from"../chunks/CodeBlock.b2a9acb5.js";import{D as ce}from"../chunks/DocNotebookDropdown.68a629d2.js";function fe(q){let o,W,B,I,r,Q,d,x,c,X,f,z='<code>StableDiffusionDepth2ImgPipeline</code>을 사용하면 텍스트 프롬프트와 초기 이미지를 전달하여 새 이미지의 생성을 조절할 수 있습니다. 또한 이미지 구조를 보존하기 위해 <code>depth_map</code>을 전달할 수도 있습니다. <code>depth_map</code>이 제공되지 않으면 파이프라인은 통합된 <a href="https://github.com/isl-org/MiDaS" rel="nofollow">depth-estimation model</a>을 통해 자동으로 깊이를 예측합니다.',E,u,A="먼저 <code>StableDiffusionDepth2ImgPipeline</code>의 인스턴스를 생성합니다:",G,g,H,h,N="이제 프롬프트를 파이프라인에 전달합니다. 특정 단어가 이미지 생성을 가이드 하는것을 방지하기 위해 <code>negative_prompt</code>를 전달할 수도 있습니다:",S,b,C,M,V='<thead><tr><th>Input</th> <th>Output</th></tr></thead> <tbody><tr><td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/></td> <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/></td></tr></tbody>',L,w,K="아래의 Spaces를 가지고 놀며 depth map이 있는 이미지와 없는 이미지의 차이가 있는지 확인해 보세요!",D,n,O,F,y,P,Z,R;return r=new me({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),d=new ce({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/ko/depth2img.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/ko/pytorch/depth2img.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/ko/tensorflow/depth2img.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/ko/depth2img.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/ko/pytorch/depth2img.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/ko/tensorflow/depth2img.ipynb"}]}}),c=new re({props:{title:"Text-guided depth-to-image 생성",local:"text-guided-depth-to-image-생성",headingTag:"h1"}}),g=new ee({props:{code:"aW1wb3J0JTIwdG9yY2glMEFpbXBvcnQlMjByZXF1ZXN0cyUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQSUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBTdGFibGVEaWZmdXNpb25EZXB0aDJJbWdQaXBlbGluZSUwQSUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25EZXB0aDJJbWdQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItZGVwdGglMjIlMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMkMlMEEpLnRvKCUyMmN1ZGElMjIp",highlighted:`<span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">import</span> requests
<span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionDepth2ImgPipeline
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
<span class="hljs-string">&quot;stabilityai/stable-diffusion-2-depth&quot;</span>,
torch_dtype=torch.float16,
).to(<span class="hljs-string">&quot;cuda&quot;</span>)`,wrap:!1}}),b=new ee({props:{code:"dXJsJTIwJTNEJTIwJTIyaHR0cCUzQSUyRiUyRmltYWdlcy5jb2NvZGF0YXNldC5vcmclMkZ2YWwyMDE3JTJGMDAwMDAwMDM5NzY5LmpwZyUyMiUwQWluaXRfaW1hZ2UlMjAlM0QlMjBJbWFnZS5vcGVuKHJlcXVlc3RzLmdldCh1cmwlMkMlMjBzdHJlYW0lM0RUcnVlKS5yYXcpJTBBcHJvbXB0JTIwJTNEJTIwJTIydHdvJTIwdGlnZXJzJTIyJTBBbl9wcm9tcHQlMjAlM0QlMjAlMjJiYWQlMkMlMjBkZWZvcm1lZCUyQyUyMHVnbHklMkMlMjBiYWQlMjBhbmF0b215JTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUzRHByb21wdCUyQyUyMGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMG5lZ2F0aXZlX3Byb21wdCUzRG5fcHJvbXB0JTJDJTIwc3RyZW5ndGglM0QwLjcpLmltYWdlcyU1QjAlNUQlMEFpbWFnZQ==",highlighted:`url = <span class="hljs-string">&quot;http://images.cocodataset.org/val2017/000000039769.jpg&quot;</span>
init_image = Image.<span class="hljs-built_in">open</span>(requests.get(url, stream=<span class="hljs-literal">True</span>).raw)
prompt = <span class="hljs-string">&quot;two tigers&quot;</span>
n_prompt = <span class="hljs-string">&quot;bad, deformed, ugly, bad anatomy&quot;</span>
image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=<span class="hljs-number">0.7</span>).images[<span class="hljs-number">0</span>]
image`,wrap:!1}}),y=new de({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/ko/using-diffusers/depth2img.md"}}),{c(){o=p("meta"),W=s(),B=p("p"),I=s(),$(r.$$.fragment),Q=s(),$(d.$$.fragment),x=s(),$(c.$$.fragment),X=s(),f=p("p"),f.innerHTML=z,E=s(),u=p("p"),u.innerHTML=A,G=s(),$(g.$$.fragment),H=s(),h=p("p"),h.innerHTML=N,S=s(),$(b.$$.fragment),C=s(),M=p("table"),M.innerHTML=V,L=s(),w=p("p"),w.textContent=K,D=s(),n=p("iframe"),F=s(),$(y.$$.fragment),P=s(),Z=p("p"),this.h()},l(e){const t=oe("svelte-u9bgzb",document.head);o=m(t,"META",{name:!0,content:!0}),t.forEach(a),W=i(e),B=m(e,"P",{}),Y(B).forEach(a),I=i(e),T(r.$$.fragment,e),Q=i(e),T(d.$$.fragment,e),x=i(e),T(c.$$.fragment,e),X=i(e),f=m(e,"P",{"data-svelte-h":!0}),U(f)!=="svelte-1nwb6xh"&&(f.innerHTML=z),E=i(e),u=m(e,"P",{"data-svelte-h":!0}),U(u)!=="svelte-b7rzk"&&(u.innerHTML=A),G=i(e),T(g.$$.fragment,e),H=i(e),h=m(e,"P",{"data-svelte-h":!0}),U(h)!=="svelte-1ospw27"&&(h.innerHTML=N),S=i(e),T(b.$$.fragment,e),C=i(e),M=m(e,"TABLE",{"data-svelte-h":!0}),U(M)!=="svelte-175x2as"&&(M.innerHTML=V),L=i(e),w=m(e,"P",{"data-svelte-h":!0}),U(w)!=="svelte-1vexnbo"&&(w.textContent=K),D=i(e),n=m(e,"IFRAME",{src:!0,frameborder:!0,width:!0,height:!0}),Y(n).forEach(a),F=i(e),T(y.$$.fragment,e),P=i(e),Z=m(e,"P",{}),Y(Z).forEach(a),this.h()},h(){j(o,"name","hf:doc:metadata"),j(o,"content",ue),ae(n.src,O="https://radames-stable-diffusion-depth2img.hf.space")||j(n,"src",O),j(n,"frameborder","0"),j(n,"width","850"),j(n,"height","500")},m(e,t){pe(document.head,o),l(e,W,t),l(e,B,t),l(e,I,t),J(r,e,t),l(e,Q,t),J(d,e,t),l(e,x,t),J(c,e,t),l(e,X,t),l(e,f,t),l(e,E,t),l(e,u,t),l(e,G,t),J(g,e,t),l(e,H,t),l(e,h,t),l(e,S,t),J(b,e,t),l(e,C,t),l(e,M,t),l(e,L,t),l(e,w,t),l(e,D,t),l(e,n,t),l(e,F,t),J(y,e,t),l(e,P,t),l(e,Z,t),R=!0},p:le,i(e){R||(_(r.$$.fragment,e),_(d.$$.fragment,e),_(c.$$.fragment,e),_(g.$$.fragment,e),_(b.$$.fragment,e),_(y.$$.fragment,e),R=!0)},o(e){k(r.$$.fragment,e),k(d.$$.fragment,e),k(c.$$.fragment,e),k(g.$$.fragment,e),k(b.$$.fragment,e),k(y.$$.fragment,e),R=!1},d(e){e&&(a(W),a(B),a(I),a(Q),a(x),a(X),a(f),a(E),a(u),a(G),a(H),a(h),a(S),a(C),a(M),a(L),a(w),a(D),a(n),a(F),a(P),a(Z)),a(o),v(r,e),v(d,e),v(c,e),v(g,e),v(b,e),v(y,e)}}}const ue='{"title":"Text-guided depth-to-image 생성","local":"text-guided-depth-to-image-생성","sections":[],"depth":1}';function ge(q){return se(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class $e extends ie{constructor(o){super(),ne(this,o,ge,fe,te,{})}}export{$e as component};

Xet Storage Details

Size:
7.77 kB
·
Xet hash:
3b8ef4775d4b5fa1850619d7a2d8fa760b332740275503447d3df405dbbea25a

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.