Buckets:

rtrm's picture
download
raw
11.9 kB
import{s as ye,n as be,o as ge}from"../chunks/scheduler.94020406.js";import{S as we,i as Ue,g as i,s as n,r,E as he,h as o,f as t,c as a,j as de,u as c,x as m,k as ne,y as _e,a as l,v as u,d as M,t as f,w as d}from"../chunks/index.a08c8d92.js";import{C as S}from"../chunks/CodeBlock.b23cf525.js";import{D as je}from"../chunks/DocNotebookDropdown.d8a25975.js";import{H as Te,E as ve}from"../chunks/EditOnGithub.b1bceb47.js";function Je(ae){let p,I,x,X,b,z,g,H,w,ie='<code>StableDiffusionPipeline</code>은 textual-inversion을 지원하는데, 이는 몇 개의 샘플 이미지만으로 stable diffusion과 같은 모델이 새로운 컨셉을 학습할 수 있도록 하는 기법입니다. 이를 통해 생성된 이미지를 더 잘 제어하고 특정 컨셉에 맞게 모델을 조정할 수 있습니다. 커뮤니티에서 만들어진 컨셉들의 컬렉션은 <a href="https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer" rel="nofollow">Stable Diffusion Conceptualizer</a>를 통해 빠르게 사용해볼 수 있습니다.',F,U,oe='이 가이드에서는 Stable Diffusion Conceptualizer에서 사전학습한 컨셉을 사용하여 textual-inversion으로 추론을 실행하는 방법을 보여드립니다. textual-inversion으로 모델에 새로운 컨셉을 학습시키는 데 관심이 있으시다면, <a href="./training/text_inversion">Textual Inversion</a> 훈련 가이드를 참조하세요.',R,h,pe="Hugging Face 계정으로 로그인하세요:",E,_,N,j,re="필요한 라이브러리를 불러오고 생성된 이미지를 시각화하기 위한 도우미 함수 <code>image_grid</code>를 만듭니다:",Y,T,P,v,ce='Stable Diffusion과 <a href="https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer" rel="nofollow">Stable Diffusion Conceptualizer</a>에서 사전학습된 컨셉을 선택합니다:',L,J,V,C,me="이제 파이프라인을 로드하고 사전학습된 컨셉을 파이프라인에 전달할 수 있습니다:",D,k,K,$,ue="특별한 placeholder token ’<code>&lt;cat-toy&gt;</code>‘를 사용하여 사전학습된 컨셉으로 프롬프트를 만들고, 생성할 샘플의 수와 이미지 행의 수를 선택합니다:",q,W,A,B,Me="그런 다음 파이프라인을 실행하고, 생성된 이미지들을 저장합니다. 그리고 처음에 만들었던 도우미 함수 <code>image_grid</code>를 사용하여 생성 결과들을 시각화합니다. 이 때 <code>num_inference_steps</code>와 <code>guidance_scale</code>과 같은 매개 변수들을 조정하여, 이것들이 이미지 품질에 어떠한 영향을 미치는지를 자유롭게 확인해보시기 바랍니다.",O,Q,ee,y,fe='<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png"/>',se,Z,te,G,le;return b=new Te({props:{title:"Textual inversion",local:"textual-inversion",headingTag:"h1"}}),g=new je({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/ko/textual_inversion_inference.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/ko/pytorch/textual_inversion_inference.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/ko/tensorflow/textual_inversion_inference.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/ko/textual_inversion_inference.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/ko/pytorch/textual_inversion_inference.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/ko/tensorflow/textual_inversion_inference.ipynb"}]}}),_=new S({props:{code:"ZnJvbSUyMGh1Z2dpbmdmYWNlX2h1YiUyMGltcG9ydCUyMG5vdGVib29rX2xvZ2luJTBBJTBBbm90ZWJvb2tfbG9naW4oKQ==",highlighted:`<span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> notebook_login
notebook_login()`,wrap:!1}}),T=new S({props:{code:"aW1wb3J0JTIwb3MlMEFpbXBvcnQlMjB0b3JjaCUwQSUwQWltcG9ydCUyMFBJTCUwQWZyb20lMjBQSUwlMjBpbXBvcnQlMjBJbWFnZSUwQSUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBTdGFibGVEaWZmdXNpb25QaXBlbGluZSUwQWZyb20lMjB0cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBDTElQSW1hZ2VQcm9jZXNzb3IlMkMlMjBDTElQVGV4dE1vZGVsJTJDJTIwQ0xJUFRva2VuaXplciUwQSUwQSUwQWRlZiUyMGltYWdlX2dyaWQoaW1ncyUyQyUyMHJvd3MlMkMlMjBjb2xzKSUzQSUwQSUyMCUyMCUyMCUyMGFzc2VydCUyMGxlbihpbWdzKSUyMCUzRCUzRCUyMHJvd3MlMjAqJTIwY29scyUwQSUwQSUyMCUyMCUyMCUyMHclMkMlMjBoJTIwJTNEJTIwaW1ncyU1QjAlNUQuc2l6ZSUwQSUyMCUyMCUyMCUyMGdyaWQlMjAlM0QlMjBJbWFnZS5uZXcoJTIyUkdCJTIyJTJDJTIwc2l6ZSUzRChjb2xzJTIwKiUyMHclMkMlMjByb3dzJTIwKiUyMGgpKSUwQSUyMCUyMCUyMCUyMGdyaWRfdyUyQyUyMGdyaWRfaCUyMCUzRCUyMGdyaWQuc2l6ZSUwQSUwQSUyMCUyMCUyMCUyMGZvciUyMGklMkMlMjBpbWclMjBpbiUyMGVudW1lcmF0ZShpbWdzKSUzQSUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMGdyaWQucGFzdGUoaW1nJTJDJTIwYm94JTNEKGklMjAlMjUlMjBjb2xzJTIwKiUyMHclMkMlMjBpJTIwJTJGJTJGJTIwY29scyUyMColMjBoKSklMEElMjAlMjAlMjAlMjByZXR1cm4lMjBncmlk",highlighted:`<span class="hljs-keyword">import</span> os
<span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">import</span> PIL
<span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline
<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
<span class="hljs-keyword">def</span> <span class="hljs-title function_">image_grid</span>(<span class="hljs-params">imgs, rows, cols</span>):
<span class="hljs-keyword">assert</span> <span class="hljs-built_in">len</span>(imgs) == rows * cols
w, h = imgs[<span class="hljs-number">0</span>].size
grid = Image.new(<span class="hljs-string">&quot;RGB&quot;</span>, size=(cols * w, rows * h))
grid_w, grid_h = grid.size
<span class="hljs-keyword">for</span> i, img <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
<span class="hljs-keyword">return</span> grid`,wrap:!1}}),J=new S({props:{code:"cHJldHJhaW5lZF9tb2RlbF9uYW1lX29yX3BhdGglMjAlM0QlMjAlMjJydW53YXltbCUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiUwQXJlcG9faWRfZW1iZWRzJTIwJTNEJTIwJTIyc2QtY29uY2VwdHMtbGlicmFyeSUyRmNhdC10b3klMjI=",highlighted:`pretrained_model_name_or_path = <span class="hljs-string">&quot;runwayml/stable-diffusion-v1-5&quot;</span>
repo_id_embeds = <span class="hljs-string">&quot;sd-concepts-library/cat-toy&quot;</span>`,wrap:!1}}),k=new S({props:{code:"cGlwZWxpbmUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQocHJldHJhaW5lZF9tb2RlbF9uYW1lX29yX3BhdGglMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKCUyMmN1ZGElMjIpJTBBJTBBcGlwZWxpbmUubG9hZF90ZXh0dWFsX2ludmVyc2lvbihyZXBvX2lkX2VtYmVkcyk=",highlighted:`pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16).to(<span class="hljs-string">&quot;cuda&quot;</span>)
pipeline.load_textual_inversion(repo_id_embeds)`,wrap:!1}}),W=new S({props:{code:"cHJvbXB0JTIwJTNEJTIwJTIyYSUyMGdyYWZpdHRpJTIwaW4lMjBhJTIwZmF2ZWxhJTIwd2FsbCUyMHdpdGglMjBhJTIwJTNDY2F0LXRveSUzRSUyMG9uJTIwaXQlMjIlMEElMEFudW1fc2FtcGxlcyUyMCUzRCUyMDIlMEFudW1fcm93cyUyMCUzRCUyMDI=",highlighted:`prompt = <span class="hljs-string">&quot;a grafitti in a favela wall with a &lt;cat-toy&gt; on it&quot;</span>
num_samples = <span class="hljs-number">2</span>
num_rows = <span class="hljs-number">2</span>`,wrap:!1}}),Q=new S({props:{code:"YWxsX2ltYWdlcyUyMCUzRCUyMCU1QiU1RCUwQWZvciUyMF8lMjBpbiUyMHJhbmdlKG51bV9yb3dzKSUzQSUwQSUyMCUyMCUyMCUyMGltYWdlcyUyMCUzRCUyMHBpcGUocHJvbXB0JTJDJTIwbnVtX2ltYWdlc19wZXJfcHJvbXB0JTNEbnVtX3NhbXBsZXMlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMjBndWlkYW5jZV9zY2FsZSUzRDcuNSkuaW1hZ2VzJTBBJTIwJTIwJTIwJTIwYWxsX2ltYWdlcy5leHRlbmQoaW1hZ2VzKSUwQSUwQWdyaWQlMjAlM0QlMjBpbWFnZV9ncmlkKGFsbF9pbWFnZXMlMkMlMjBudW1fc2FtcGxlcyUyQyUyMG51bV9yb3dzKSUwQWdyaWQ=",highlighted:`all_images = []
<span class="hljs-keyword">for</span> _ <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_rows):
images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=<span class="hljs-number">50</span>, guidance_scale=<span class="hljs-number">7.5</span>).images
all_images.extend(images)
grid = image_grid(all_images, num_samples, num_rows)
grid`,wrap:!1}}),Z=new ve({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/ko/using-diffusers/textual_inversion_inference.md"}}),{c(){p=i("meta"),I=n(),x=i("p"),X=n(),r(b.$$.fragment),z=n(),r(g.$$.fragment),H=n(),w=i("p"),w.innerHTML=ie,F=n(),U=i("p"),U.innerHTML=oe,R=n(),h=i("p"),h.textContent=pe,E=n(),r(_.$$.fragment),N=n(),j=i("p"),j.innerHTML=re,Y=n(),r(T.$$.fragment),P=n(),v=i("p"),v.innerHTML=ce,L=n(),r(J.$$.fragment),V=n(),C=i("p"),C.textContent=me,D=n(),r(k.$$.fragment),K=n(),$=i("p"),$.innerHTML=ue,q=n(),r(W.$$.fragment),A=n(),B=i("p"),B.innerHTML=Me,O=n(),r(Q.$$.fragment),ee=n(),y=i("div"),y.innerHTML=fe,se=n(),r(Z.$$.fragment),te=n(),G=i("p"),this.h()},l(e){const s=he("svelte-u9bgzb",document.head);p=o(s,"META",{name:!0,content:!0}),s.forEach(t),I=a(e),x=o(e,"P",{}),de(x).forEach(t),X=a(e),c(b.$$.fragment,e),z=a(e),c(g.$$.fragment,e),H=a(e),w=o(e,"P",{"data-svelte-h":!0}),m(w)!=="svelte-nm7oku"&&(w.innerHTML=ie),F=a(e),U=o(e,"P",{"data-svelte-h":!0}),m(U)!=="svelte-6ecc9h"&&(U.innerHTML=oe),R=a(e),h=o(e,"P",{"data-svelte-h":!0}),m(h)!=="svelte-1nfmd9t"&&(h.textContent=pe),E=a(e),c(_.$$.fragment,e),N=a(e),j=o(e,"P",{"data-svelte-h":!0}),m(j)!=="svelte-1etucvv"&&(j.innerHTML=re),Y=a(e),c(T.$$.fragment,e),P=a(e),v=o(e,"P",{"data-svelte-h":!0}),m(v)!=="svelte-5ws1w4"&&(v.innerHTML=ce),L=a(e),c(J.$$.fragment,e),V=a(e),C=o(e,"P",{"data-svelte-h":!0}),m(C)!=="svelte-rgvcdq"&&(C.textContent=me),D=a(e),c(k.$$.fragment,e),K=a(e),$=o(e,"P",{"data-svelte-h":!0}),m($)!=="svelte-g8ujvg"&&($.innerHTML=ue),q=a(e),c(W.$$.fragment,e),A=a(e),B=o(e,"P",{"data-svelte-h":!0}),m(B)!=="svelte-1fnsviy"&&(B.innerHTML=Me),O=a(e),c(Q.$$.fragment,e),ee=a(e),y=o(e,"DIV",{class:!0,"data-svelte-h":!0}),m(y)!=="svelte-1r5zq0s"&&(y.innerHTML=fe),se=a(e),c(Z.$$.fragment,e),te=a(e),G=o(e,"P",{}),de(G).forEach(t),this.h()},h(){ne(p,"name","hf:doc:metadata"),ne(p,"content",Ce),ne(y,"class","flex justify-center")},m(e,s){_e(document.head,p),l(e,I,s),l(e,x,s),l(e,X,s),u(b,e,s),l(e,z,s),u(g,e,s),l(e,H,s),l(e,w,s),l(e,F,s),l(e,U,s),l(e,R,s),l(e,h,s),l(e,E,s),u(_,e,s),l(e,N,s),l(e,j,s),l(e,Y,s),u(T,e,s),l(e,P,s),l(e,v,s),l(e,L,s),u(J,e,s),l(e,V,s),l(e,C,s),l(e,D,s),u(k,e,s),l(e,K,s),l(e,$,s),l(e,q,s),u(W,e,s),l(e,A,s),l(e,B,s),l(e,O,s),u(Q,e,s),l(e,ee,s),l(e,y,s),l(e,se,s),u(Z,e,s),l(e,te,s),l(e,G,s),le=!0},p:be,i(e){le||(M(b.$$.fragment,e),M(g.$$.fragment,e),M(_.$$.fragment,e),M(T.$$.fragment,e),M(J.$$.fragment,e),M(k.$$.fragment,e),M(W.$$.fragment,e),M(Q.$$.fragment,e),M(Z.$$.fragment,e),le=!0)},o(e){f(b.$$.fragment,e),f(g.$$.fragment,e),f(_.$$.fragment,e),f(T.$$.fragment,e),f(J.$$.fragment,e),f(k.$$.fragment,e),f(W.$$.fragment,e),f(Q.$$.fragment,e),f(Z.$$.fragment,e),le=!1},d(e){e&&(t(I),t(x),t(X),t(z),t(H),t(w),t(F),t(U),t(R),t(h),t(E),t(N),t(j),t(Y),t(P),t(v),t(L),t(V),t(C),t(D),t(K),t($),t(q),t(A),t(B),t(O),t(ee),t(y),t(se),t(te),t(G)),t(p),d(b,e),d(g,e),d(_,e),d(T,e),d(J,e),d(k,e),d(W,e),d(Q,e),d(Z,e)}}}const Ce='{"title":"Textual inversion","local":"textual-inversion","sections":[],"depth":1}';function ke(ae){return ge(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Se extends we{constructor(p){super(),Ue(this,p,ke,Je,ye,{})}}export{Se as component};

Xet Storage Details

Size:
11.9 kB
·
Xet hash:
82e162aebe1632f92bb325dd45d44330fb95e083f027b89640c266fcf1171b38

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.