Buckets:
| import{s as qt,o as Dt,n as mt}from"../chunks/scheduler.94020406.js";import{S as Pt,i as Ot,g as w,s as o,r as u,E as Kt,h as T,f as s,c as f,j as zt,u as M,x as _,k as Ht,y as te,a as n,v as d,d as J,t as b,w as h}from"../chunks/index.a08c8d92.js";import{T as ee}from"../chunks/Tip.3b0aeee8.js";import{C as B}from"../chunks/CodeBlock.b23cf525.js";import{F as Qt,M as It}from"../chunks/Markdown.23159a98.js";import{H as pt,E as le}from"../chunks/EditOnGithub.b1bceb47.js";function ae(j){let l,r="text-to-image 파인튜닝 스크립트는 experimental 상태입니다. 과적합하기 쉽고 치명적인 망각과 같은 문제에 부딪히기 쉽습니다. 자체 데이터셋에서 최상의 결과를 얻으려면 다양한 하이퍼파라미터를 탐색하는 것이 좋습니다.";return{c(){l=w("p"),l.textContent=r},l(e){l=T(e,"P",{"data-svelte-h":!0}),_(l)!=="svelte-szes5h"&&(l.textContent=r)},m(e,i){n(e,l,i)},p:mt,d(e){e&&s(l)}}}function se(j){let l,r='다음과 같이 <a href="https://huggingface.co/datasets/lambdalabs/naruto-blip-captions" rel="nofollow">Naruto BLIP 캡션</a> 데이터셋에서 파인튜닝 실행을 위해 <a href="https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py" rel="nofollow">PyTorch 학습 스크립트</a>를 실행합니다:',e,i,c,C,g='자체 데이터셋으로 파인튜닝하려면 🤗 <a href="https://huggingface.co/docs/datasets/index" rel="nofollow">Datasets</a>에서 요구하는 형식에 따라 데이터셋을 준비하세요. <a href="https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub" rel="nofollow">데이터셋을 허브에 업로드</a>하거나 [파일들이 있는 로컬 폴더를 준비](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)할 수 있습니다.',Z,$,N="사용자 커스텀 loading logic을 사용하려면 스크립트를 수정하십시오. 도움이 되도록 코드의 적절한 위치에 포인터를 남겼습니다. 🤗 아래 예제 스크립트는 <code>TRAIN_DIR</code>의 로컬 데이터셋으로를 파인튜닝하는 방법과 <code>OUTPUT_DIR</code>에서 모델을 저장할 위치를 보여줍니다:",W,U,I;return i=new B({props:{code:"ZXhwb3J0JTIwTU9ERUxfTkFNRSUzRCUyMkNvbXBWaXMlMkZzdGFibGUtZGlmZnVzaW9uLXYxLTQlMjIlMEFleHBvcnQlMjBkYXRhc2V0X25hbWUlM0QlMjJsYW1iZGFsYWJzJTJGbmFydXRvLWJsaXAtY2FwdGlvbnMlMjIlMEElMEFhY2NlbGVyYXRlJTIwbGF1bmNoJTIwdHJhaW5fdGV4dF90b19pbWFnZS5weSUyMCU1QyUwQSUyMCUyMC0tcHJldHJhaW5lZF9tb2RlbF9uYW1lX29yX3BhdGglM0QlMjRNT0RFTF9OQU1FJTIwJTVDJTBBJTIwJTIwLS1kYXRhc2V0X25hbWUlM0QlMjRkYXRhc2V0X25hbWUlMjAlNUMlMEElMjAlMjAtLXVzZV9lbWElMjAlNUMlMEElMjAlMjAtLXJlc29sdXRpb24lM0Q1MTIlMjAtLWNlbnRlcl9jcm9wJTIwLS1yYW5kb21fZmxpcCUyMCU1QyUwQSUyMCUyMC0tdHJhaW5fYmF0Y2hfc2l6ZSUzRDElMjAlNUMlMEElMjAlMjAtLWdyYWRpZW50X2FjY3VtdWxhdGlvbl9zdGVwcyUzRDQlMjAlNUMlMEElMjAlMjAtLWdyYWRpZW50X2NoZWNrcG9pbnRpbmclMjAlNUMlMEElMjAlMjAtLW1peGVkX3ByZWNpc2lvbiUzRCUyMmZwMTYlMjIlMjAlNUMlMEElMjAlMjAtLW1heF90cmFpbl9zdGVwcyUzRDE1MDAwJTIwJTVDJTBBJTIwJTIwLS1sZWFybmluZ19yYXRlJTNEMWUtMDUlMjAlNUMlMEElMjAlMjAtLW1heF9ncmFkX25vcm0lM0QxJTIwJTVDJTBBJTIwJTIwLS1scl9zY2hlZHVsZXIlM0QlMjJjb25zdGFudCUyMiUyMC0tbHJfd2FybXVwX3N0ZXBzJTNEMCUyMCU1QyUwQSUyMCUyMC0tb3V0cHV0X2RpciUzRCUyMnNkLW5hcnV0by1tb2RlbCUyMg==",highlighted:`<span class="hljs-built_in">export</span> MODEL_NAME=<span class="hljs-string">"CompVis/stable-diffusion-v1-4"</span> | |
| <span class="hljs-built_in">export</span> dataset_name=<span class="hljs-string">"lambdalabs/naruto-blip-captions"</span> | |
| accelerate launch train_text_to_image.py \\ | |
| --pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_NAME</span> \\ | |
| --dataset_name=<span class="hljs-variable">$dataset_name</span> \\ | |
| --use_ema \\ | |
| --resolution=512 --center_crop --random_flip \\ | |
| --train_batch_size=1 \\ | |
| --gradient_accumulation_steps=4 \\ | |
| --gradient_checkpointing \\ | |
| --mixed_precision=<span class="hljs-string">"fp16"</span> \\ | |
| --max_train_steps=15000 \\ | |
| --learning_rate=1e-05 \\ | |
| --max_grad_norm=1 \\ | |
| --lr_scheduler=<span class="hljs-string">"constant"</span> --lr_warmup_steps=0 \\ | |
| --output_dir=<span class="hljs-string">"sd-naruto-model"</span>`,wrap:!1}}),U=new B({props:{code:"ZXhwb3J0JTIwTU9ERUxfTkFNRSUzRCUyMkNvbXBWaXMlMkZzdGFibGUtZGlmZnVzaW9uLXYxLTQlMjIlMEFleHBvcnQlMjBUUkFJTl9ESVIlM0QlMjJwYXRoX3RvX3lvdXJfZGF0YXNldCUyMiUwQWV4cG9ydCUyME9VVFBVVF9ESVIlM0QlMjJwYXRoX3RvX3NhdmVfbW9kZWwlMjIlMEElMEFhY2NlbGVyYXRlJTIwbGF1bmNoJTIwdHJhaW5fdGV4dF90b19pbWFnZS5weSUyMCU1QyUwQSUyMCUyMC0tcHJldHJhaW5lZF9tb2RlbF9uYW1lX29yX3BhdGglM0QlMjRNT0RFTF9OQU1FJTIwJTVDJTBBJTIwJTIwLS10cmFpbl9kYXRhX2RpciUzRCUyNFRSQUlOX0RJUiUyMCU1QyUwQSUyMCUyMC0tdXNlX2VtYSUyMCU1QyUwQSUyMCUyMC0tcmVzb2x1dGlvbiUzRDUxMiUyMC0tY2VudGVyX2Nyb3AlMjAtLXJhbmRvbV9mbGlwJTIwJTVDJTBBJTIwJTIwLS10cmFpbl9iYXRjaF9zaXplJTNEMSUyMCU1QyUwQSUyMCUyMC0tZ3JhZGllbnRfYWNjdW11bGF0aW9uX3N0ZXBzJTNENCUyMCU1QyUwQSUyMCUyMC0tZ3JhZGllbnRfY2hlY2twb2ludGluZyUyMCU1QyUwQSUyMCUyMC0tbWl4ZWRfcHJlY2lzaW9uJTNEJTIyZnAxNiUyMiUyMCU1QyUwQSUyMCUyMC0tbWF4X3RyYWluX3N0ZXBzJTNEMTUwMDAlMjAlNUMlMEElMjAlMjAtLWxlYXJuaW5nX3JhdGUlM0QxZS0wNSUyMCU1QyUwQSUyMCUyMC0tbWF4X2dyYWRfbm9ybSUzRDElMjAlNUMlMEElMjAlMjAtLWxyX3NjaGVkdWxlciUzRCUyMmNvbnN0YW50JTIyJTIwLS1scl93YXJtdXBfc3RlcHMlM0QwJTIwJTVDJTBBJTIwJTIwLS1vdXRwdXRfZGlyJTNEJTI0JTdCT1VUUFVUX0RJUiU3RA==",highlighted:`<span class="hljs-built_in">export</span> MODEL_NAME=<span class="hljs-string">"CompVis/stable-diffusion-v1-4"</span> | |
| <span class="hljs-built_in">export</span> TRAIN_DIR=<span class="hljs-string">"path_to_your_dataset"</span> | |
| <span class="hljs-built_in">export</span> OUTPUT_DIR=<span class="hljs-string">"path_to_save_model"</span> | |
| accelerate launch train_text_to_image.py \\ | |
| --pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_NAME</span> \\ | |
| --train_data_dir=<span class="hljs-variable">$TRAIN_DIR</span> \\ | |
| --use_ema \\ | |
| --resolution=512 --center_crop --random_flip \\ | |
| --train_batch_size=1 \\ | |
| --gradient_accumulation_steps=4 \\ | |
| --gradient_checkpointing \\ | |
| --mixed_precision=<span class="hljs-string">"fp16"</span> \\ | |
| --max_train_steps=15000 \\ | |
| --learning_rate=1e-05 \\ | |
| --max_grad_norm=1 \\ | |
| --lr_scheduler=<span class="hljs-string">"constant"</span> --lr_warmup_steps=0 \\ | |
| --output_dir=<span class="hljs-variable">\${OUTPUT_DIR}</span>`,wrap:!1}}),{c(){l=w("p"),l.innerHTML=r,e=o(),u(i.$$.fragment),c=o(),C=w("p"),C.innerHTML=g,Z=o(),$=w("p"),$.innerHTML=N,W=o(),u(U.$$.fragment)},l(m){l=T(m,"P",{"data-svelte-h":!0}),_(l)!=="svelte-jxsit4"&&(l.innerHTML=r),e=f(m),M(i.$$.fragment,m),c=f(m),C=T(m,"P",{"data-svelte-h":!0}),_(C)!=="svelte-19wquqk"&&(C.innerHTML=g),Z=f(m),$=T(m,"P",{"data-svelte-h":!0}),_($)!=="svelte-i9hrjr"&&($.innerHTML=N),W=f(m),M(U.$$.fragment,m)},m(m,y){n(m,l,y),n(m,e,y),d(i,m,y),n(m,c,y),n(m,C,y),n(m,Z,y),n(m,$,y),n(m,W,y),d(U,m,y),I=!0},p:mt,i(m){I||(J(i.$$.fragment,m),J(U.$$.fragment,m),I=!0)},o(m){b(i.$$.fragment,m),b(U.$$.fragment,m),I=!1},d(m){m&&(s(l),s(e),s(c),s(C),s(Z),s($),s(W)),h(i,m),h(U,m)}}}function ne(j){let l,r;return l=new It({props:{$$slots:{default:[se]},$$scope:{ctx:j}}}),{c(){u(l.$$.fragment)},l(e){M(l.$$.fragment,e)},m(e,i){d(l,e,i),r=!0},p(e,i){const c={};i&2&&(c.$$scope={dirty:i,ctx:e}),l.$set(c)},i(e){r||(J(l.$$.fragment,e),r=!0)},o(e){b(l.$$.fragment,e),r=!1},d(e){h(l,e)}}}function pe(j){let l,r='<a href="https://github.com/duongna21" rel="nofollow">@duongna211</a>의 기여로, Flax를 사용해 TPU 및 GPU에서 Stable Diffusion 모델을 더 빠르게 학습할 수 있습니다. 이는 TPU 하드웨어에서 매우 효율적이지만 GPU에서도 훌륭하게 작동합니다. Flax 학습 스크립트는 gradient checkpointing나 gradient accumulation과 같은 기능을 아직 지원하지 않으므로 메모리가 30GB 이상인 GPU 또는 TPU v3가 필요합니다.',e,i,c="스크립트를 실행하기 전에 요구 사항이 설치되어 있는지 확인하십시오:",C,g,Z,$,N='그러면 다음과 같이 <a href="https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_flax.py" rel="nofollow">Flax 학습 스크립트</a>를 실행할 수 있습니다.',W,U,I,m,y='자체 데이터셋으로 파인튜닝하려면 🤗 <a href="https://huggingface.co/docs/datasets/index" rel="nofollow">Datasets</a>에서 요구하는 형식에 따라 데이터셋을 준비하세요. <a href="https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub" rel="nofollow">데이터셋을 허브에 업로드</a>하거나 [파일들이 있는 로컬 폴더를 준비](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)할 수 있습니다.',V,X,it="사용자 커스텀 loading logic을 사용하려면 스크립트를 수정하십시오. 도움이 되도록 코드의 적절한 위치에 포인터를 남겼습니다. 🤗 아래 예제 스크립트는 <code>TRAIN_DIR</code>의 로컬 데이터셋으로를 파인튜닝하는 방법을 보여줍니다:",F,R,G;return g=new B({props:{code:"cGlwJTIwaW5zdGFsbCUyMC1VJTIwLXIlMjByZXF1aXJlbWVudHNfZmxheC50eHQ=",highlighted:"pip install -U -r requirements_flax.txt",wrap:!1}}),U=new B({props:{code:"ZXhwb3J0JTIwTU9ERUxfTkFNRSUzRCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBZXhwb3J0JTIwZGF0YXNldF9uYW1lJTNEJTIybGFtYmRhbGFicyUyRm5hcnV0by1ibGlwLWNhcHRpb25zJTIyJTBBJTBBcHl0aG9uJTIwdHJhaW5fdGV4dF90b19pbWFnZV9mbGF4LnB5JTIwJTVDJTBBJTIwJTIwLS1wcmV0cmFpbmVkX21vZGVsX25hbWVfb3JfcGF0aCUzRCUyNE1PREVMX05BTUUlMjAlNUMlMEElMjAlMjAtLWRhdGFzZXRfbmFtZSUzRCUyNGRhdGFzZXRfbmFtZSUyMCU1QyUwQSUyMCUyMC0tcmVzb2x1dGlvbiUzRDUxMiUyMC0tY2VudGVyX2Nyb3AlMjAtLXJhbmRvbV9mbGlwJTIwJTVDJTBBJTIwJTIwLS10cmFpbl9iYXRjaF9zaXplJTNEMSUyMCU1QyUwQSUyMCUyMC0tbWF4X3RyYWluX3N0ZXBzJTNEMTUwMDAlMjAlNUMlMEElMjAlMjAtLWxlYXJuaW5nX3JhdGUlM0QxZS0wNSUyMCU1QyUwQSUyMCUyMC0tbWF4X2dyYWRfbm9ybSUzRDElMjAlNUMlMEElMjAlMjAtLW91dHB1dF9kaXIlM0QlMjJzZC1uYXJ1dG8tbW9kZWwlMjI=",highlighted:`<span class="hljs-built_in">export</span> MODEL_NAME=<span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span> | |
| <span class="hljs-built_in">export</span> dataset_name=<span class="hljs-string">"lambdalabs/naruto-blip-captions"</span> | |
| python train_text_to_image_flax.py \\ | |
| --pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_NAME</span> \\ | |
| --dataset_name=<span class="hljs-variable">$dataset_name</span> \\ | |
| --resolution=512 --center_crop --random_flip \\ | |
| --train_batch_size=1 \\ | |
| --max_train_steps=15000 \\ | |
| --learning_rate=1e-05 \\ | |
| --max_grad_norm=1 \\ | |
| --output_dir=<span class="hljs-string">"sd-naruto-model"</span>`,wrap:!1}}),R=new B({props:{code:"ZXhwb3J0JTIwTU9ERUxfTkFNRSUzRCUyMmR1b25nbmElMkZzdGFibGUtZGlmZnVzaW9uLXYxLTQtZmxheCUyMiUwQWV4cG9ydCUyMFRSQUlOX0RJUiUzRCUyMnBhdGhfdG9feW91cl9kYXRhc2V0JTIyJTBBJTBBcHl0aG9uJTIwdHJhaW5fdGV4dF90b19pbWFnZV9mbGF4LnB5JTIwJTVDJTBBJTIwJTIwLS1wcmV0cmFpbmVkX21vZGVsX25hbWVfb3JfcGF0aCUzRCUyNE1PREVMX05BTUUlMjAlNUMlMEElMjAlMjAtLXRyYWluX2RhdGFfZGlyJTNEJTI0VFJBSU5fRElSJTIwJTVDJTBBJTIwJTIwLS1yZXNvbHV0aW9uJTNENTEyJTIwLS1jZW50ZXJfY3JvcCUyMC0tcmFuZG9tX2ZsaXAlMjAlNUMlMEElMjAlMjAtLXRyYWluX2JhdGNoX3NpemUlM0QxJTIwJTVDJTBBJTIwJTIwLS1taXhlZF9wcmVjaXNpb24lM0QlMjJmcDE2JTIyJTIwJTVDJTBBJTIwJTIwLS1tYXhfdHJhaW5fc3RlcHMlM0QxNTAwMCUyMCU1QyUwQSUyMCUyMC0tbGVhcm5pbmdfcmF0ZSUzRDFlLTA1JTIwJTVDJTBBJTIwJTIwLS1tYXhfZ3JhZF9ub3JtJTNEMSUyMCU1QyUwQSUyMCUyMC0tb3V0cHV0X2RpciUzRCUyMnNkLW5hcnV0by1tb2RlbCUyMg==",highlighted:`<span class="hljs-built_in">export</span> MODEL_NAME=<span class="hljs-string">"duongna/stable-diffusion-v1-4-flax"</span> | |
| <span class="hljs-built_in">export</span> TRAIN_DIR=<span class="hljs-string">"path_to_your_dataset"</span> | |
| python train_text_to_image_flax.py \\ | |
| --pretrained_model_name_or_path=<span class="hljs-variable">$MODEL_NAME</span> \\ | |
| --train_data_dir=<span class="hljs-variable">$TRAIN_DIR</span> \\ | |
| --resolution=512 --center_crop --random_flip \\ | |
| --train_batch_size=1 \\ | |
| --mixed_precision=<span class="hljs-string">"fp16"</span> \\ | |
| --max_train_steps=15000 \\ | |
| --learning_rate=1e-05 \\ | |
| --max_grad_norm=1 \\ | |
| --output_dir=<span class="hljs-string">"sd-naruto-model"</span>`,wrap:!1}}),{c(){l=w("p"),l.innerHTML=r,e=o(),i=w("p"),i.textContent=c,C=o(),u(g.$$.fragment),Z=o(),$=w("p"),$.innerHTML=N,W=o(),u(U.$$.fragment),I=o(),m=w("p"),m.innerHTML=y,V=o(),X=w("p"),X.innerHTML=it,F=o(),u(R.$$.fragment)},l(p){l=T(p,"P",{"data-svelte-h":!0}),_(l)!=="svelte-c7loxj"&&(l.innerHTML=r),e=f(p),i=T(p,"P",{"data-svelte-h":!0}),_(i)!=="svelte-1w9ydbl"&&(i.textContent=c),C=f(p),M(g.$$.fragment,p),Z=f(p),$=T(p,"P",{"data-svelte-h":!0}),_($)!=="svelte-16f5q4a"&&($.innerHTML=N),W=f(p),M(U.$$.fragment,p),I=f(p),m=T(p,"P",{"data-svelte-h":!0}),_(m)!=="svelte-19wquqk"&&(m.innerHTML=y),V=f(p),X=T(p,"P",{"data-svelte-h":!0}),_(X)!=="svelte-3igbkc"&&(X.innerHTML=it),F=f(p),M(R.$$.fragment,p)},m(p,x){n(p,l,x),n(p,e,x),n(p,i,x),n(p,C,x),d(g,p,x),n(p,Z,x),n(p,$,x),n(p,W,x),d(U,p,x),n(p,I,x),n(p,m,x),n(p,V,x),n(p,X,x),n(p,F,x),d(R,p,x),G=!0},p:mt,i(p){G||(J(g.$$.fragment,p),J(U.$$.fragment,p),J(R.$$.fragment,p),G=!0)},o(p){b(g.$$.fragment,p),b(U.$$.fragment,p),b(R.$$.fragment,p),G=!1},d(p){p&&(s(l),s(e),s(i),s(C),s(Z),s($),s(W),s(I),s(m),s(V),s(X),s(F)),h(g,p),h(U,p),h(R,p)}}}function ie(j){let l,r;return l=new It({props:{$$slots:{default:[pe]},$$scope:{ctx:j}}}),{c(){u(l.$$.fragment)},l(e){M(l.$$.fragment,e)},m(e,i){d(l,e,i),r=!0},p(e,i){const c={};i&2&&(c.$$scope={dirty:i,ctx:e}),l.$set(c)},i(e){r||(J(l.$$.fragment,e),r=!0)},o(e){b(l.$$.fragment,e),r=!1},d(e){h(l,e)}}}function re(j){let l,r;return l=new B({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBJTBBbW9kZWxfcGF0aCUyMCUzRCUyMCUyMnBhdGhfdG9fc2F2ZWRfbW9kZWwlMjIlMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKG1vZGVsX3BhdGglMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlM0QlMjJ5b2RhJTIyKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJ5b2RhLW5hcnV0by5wbmclMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| model_path = <span class="hljs-string">"path_to_saved_model"</span> | |
| pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| image = pipe(prompt=<span class="hljs-string">"yoda"</span>).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"yoda-naruto.png"</span>)`,wrap:!1}}),{c(){u(l.$$.fragment)},l(e){M(l.$$.fragment,e)},m(e,i){d(l,e,i),r=!0},p:mt,i(e){r||(J(l.$$.fragment,e),r=!0)},o(e){b(l.$$.fragment,e),r=!1},d(e){h(l,e)}}}function me(j){let l,r;return l=new It({props:{$$slots:{default:[re]},$$scope:{ctx:j}}}),{c(){u(l.$$.fragment)},l(e){M(l.$$.fragment,e)},m(e,i){d(l,e,i),r=!0},p(e,i){const c={};i&2&&(c.$$scope={dirty:i,ctx:e}),l.$set(c)},i(e){r||(J(l.$$.fragment,e),r=!0)},o(e){b(l.$$.fragment,e),r=!1},d(e){h(l,e)}}}function oe(j){let l,r;return l=new B({props:{code:"aW1wb3J0JTIwamF4JTBBaW1wb3J0JTIwbnVtcHklMjBhcyUyMG5wJTBBZnJvbSUyMGZsYXguamF4X3V0aWxzJTIwaW1wb3J0JTIwcmVwbGljYXRlJTBBZnJvbSUyMGZsYXgudHJhaW5pbmcuY29tbW9uX3V0aWxzJTIwaW1wb3J0JTIwc2hhcmQlMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmxheFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBJTBBbW9kZWxfcGF0aCUyMCUzRCUyMCUyMnBhdGhfdG9fc2F2ZWRfbW9kZWwlMjIlMEFwaXBlJTJDJTIwcGFyYW1zJTIwJTNEJTIwRmxheFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZChtb2RlbF9wYXRoJTJDJTIwZHR5cGUlM0RqYXgubnVtcHkuYmZsb2F0MTYpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyeW9kYSUyMG5hcnV0byUyMiUwQXBybmdfc2VlZCUyMCUzRCUyMGpheC5yYW5kb20uUFJOR0tleSgwKSUwQW51bV9pbmZlcmVuY2Vfc3RlcHMlMjAlM0QlMjA1MCUwQSUwQW51bV9zYW1wbGVzJTIwJTNEJTIwamF4LmRldmljZV9jb3VudCgpJTBBcHJvbXB0JTIwJTNEJTIwbnVtX3NhbXBsZXMlMjAqJTIwJTVCcHJvbXB0JTVEJTBBcHJvbXB0X2lkcyUyMCUzRCUyMHBpcGVsaW5lLnByZXBhcmVfaW5wdXRzKHByb21wdCklMEElMEElMjMlMjBzaGFyZCUyMGlucHV0cyUyMGFuZCUyMHJuZyUwQXBhcmFtcyUyMCUzRCUyMHJlcGxpY2F0ZShwYXJhbXMpJTBBcHJuZ19zZWVkJTIwJTNEJTIwamF4LnJhbmRvbS5zcGxpdChwcm5nX3NlZWQlMkMlMjBqYXguZGV2aWNlX2NvdW50KCkpJTBBcHJvbXB0X2lkcyUyMCUzRCUyMHNoYXJkKHByb21wdF9pZHMpJTBBJTBBaW1hZ2VzJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0X2lkcyUyQyUyMHBhcmFtcyUyQyUyMHBybmdfc2VlZCUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlMkMlMjBqaXQlM0RUcnVlKS5pbWFnZXMlMEFpbWFnZXMlMjAlM0QlMjBwaXBlbGluZS5udW1weV90b19waWwobnAuYXNhcnJheShpbWFnZXMucmVzaGFwZSgobnVtX3NhbXBsZXMlMkMpJTIwJTJCJTIwaW1hZ2VzLnNoYXBlJTVCLTMlM0ElNUQpKSklMEFpbWFnZS5zYXZlKCUyMnlvZGEtbmFydXRvLnBuZyUyMik=",highlighted:`<span class="hljs-keyword">import</span> jax | |
| <span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np | |
| <span class="hljs-keyword">from</span> flax.jax_utils <span class="hljs-keyword">import</span> replicate | |
| <span class="hljs-keyword">from</span> flax.training.common_utils <span class="hljs-keyword">import</span> shard | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FlaxStableDiffusionPipeline | |
| model_path = <span class="hljs-string">"path_to_saved_model"</span> | |
| pipe, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) | |
| prompt = <span class="hljs-string">"yoda naruto"</span> | |
| prng_seed = jax.random.PRNGKey(<span class="hljs-number">0</span>) | |
| num_inference_steps = <span class="hljs-number">50</span> | |
| num_samples = jax.device_count() | |
| prompt = num_samples * [prompt] | |
| prompt_ids = pipeline.prepare_inputs(prompt) | |
| <span class="hljs-comment"># shard inputs and rng</span> | |
| params = replicate(params) | |
| prng_seed = jax.random.split(prng_seed, jax.device_count()) | |
| prompt_ids = shard(prompt_ids) | |
| images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=<span class="hljs-literal">True</span>).images | |
| images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-<span class="hljs-number">3</span>:]))) | |
| image.save(<span class="hljs-string">"yoda-naruto.png"</span>)`,wrap:!1}}),{c(){u(l.$$.fragment)},l(e){M(l.$$.fragment,e)},m(e,i){d(l,e,i),r=!0},p:mt,i(e){r||(J(l.$$.fragment,e),r=!0)},o(e){b(l.$$.fragment,e),r=!1},d(e){h(l,e)}}}function fe(j){let l,r;return l=new It({props:{$$slots:{default:[oe]},$$scope:{ctx:j}}}),{c(){u(l.$$.fragment)},l(e){M(l.$$.fragment,e)},m(e,i){d(l,e,i),r=!0},p(e,i){const c={};i&2&&(c.$$scope={dirty:i,ctx:e}),l.$set(c)},i(e){r||(J(l.$$.fragment,e),r=!0)},o(e){b(l.$$.fragment,e),r=!1},d(e){h(l,e)}}}function ce(j){let l,r,e,i,c,C,g,Z,$,N='Stable Diffusion과 같은 text-to-image 모델은 텍스트 프롬프트에서 이미지를 생성합니다. 이 가이드는 PyTorch 및 Flax를 사용하여 자체 데이터셋에서 <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" rel="nofollow"><code>CompVis/stable-diffusion-v1-4</code></a> 모델로 파인튜닝하는 방법을 보여줍니다. 이 가이드에 사용된 text-to-image 파인튜닝을 위한 모든 학습 스크립트에 관심이 있는 경우 이 <a href="https://github.com/huggingface/diffusers/tree/main/examples/text_to_image" rel="nofollow">리포지토리</a>에서 자세히 찾을 수 있습니다.',W,U,I="스크립트를 실행하기 전에, 라이브러리의 학습 dependency들을 설치해야 합니다:",m,y,V,X,it='그리고 <a href="https://github.com/huggingface/accelerate/" rel="nofollow">🤗Accelerate</a> 환경을 초기화합니다:',F,R,G,p,x="리포지토리를 이미 복제한 경우, 이 단계를 수행할 필요가 없습니다. 대신, 로컬 체크아웃 경로를 학습 스크립트에 명시할 수 있으며 거기에서 로드됩니다.",ot,k,ft,L,Bt='<code>gradient_checkpointing</code> 및 <code>mixed_precision</code>을 사용하면 단일 24GB GPU에서 모델을 파인튜닝할 수 있습니다. 더 높은 <code>batch_size</code>와 더 빠른 훈련을 위해서는 GPU 메모리가 30GB 이상인 GPU를 사용하는 것이 좋습니다. TPU 또는 GPU에서 파인튜닝을 위해 JAX나 Flax를 사용할 수도 있습니다. 자세한 내용은 <a href="#flax-jax-finetuning">아래</a>를 참조하세요.',ct,S,Vt='xFormers로 memory efficient attention을 활성화하여 메모리 사용량 훨씬 더 줄일 수 있습니다. <a href="./optimization/xformers">xFormers가 설치</a>되어 있는지 확인하고 <code>--enable_xformers_memory_efficient_attention</code>를 학습 스크립트에 명시합니다.',ut,Y,Ft="xFormers는 Flax에 사용할 수 없습니다.",Mt,A,dt,z,Gt="학습 스크립트에 다음 인수를 추가하여 모델을 허브에 저장합니다:",Jt,H,bt,Q,ht,q,Nt="학습 중 발생할 수 있는 일에 대비하여 정기적으로 체크포인트를 저장해 두는 것이 좋습니다. 체크포인트를 저장하려면 학습 스크립트에 다음 인수를 명시합니다.",wt,D,Tt,P,vt="500스텝마다 전체 학습 state가 ‘output_dir’의 하위 폴더에 저장됩니다. 체크포인트는 ‘checkpoint-‘에 지금까지 학습된 step 수입니다. 예를 들어 ‘checkpoint-1500’은 1500 학습 step 후에 저장된 체크포인트입니다.",$t,O,Et="학습을 재개하기 위해 체크포인트를 불러오려면 ‘—resume_from_checkpoint’ 인수를 학습 스크립트에 명시하고 재개할 체크포인트를 지정하십시오. 예를 들어 다음 인수는 1500개의 학습 step 후에 저장된 체크포인트에서부터 훈련을 재개합니다.",Ut,K,_t,tt,yt,v,gt,et,jt,lt,kt='Text-to-image 모델 파인튜닝을 위해, 대규모 모델 학습을 가속화하기 위한 파인튜닝 기술인 LoRA(Low-Rank Adaptation of Large Language Models)를 사용할 수 있습니다. 자세한 내용은 <a href="lora#text-to-image">LoRA 학습</a> 가이드를 참조하세요.',xt,at,Ct,st,Lt="허브의 모델 경로 또는 모델 이름을 <code>StableDiffusionPipeline</code>에 전달하여 추론을 위해 파인 튜닝된 모델을 불러올 수 있습니다:",Xt,E,Rt,nt,Zt,rt,Wt;return c=new pt({props:{title:"Text-to-image",local:"text-to-image",headingTag:"h1"}}),g=new ee({props:{warning:!0,$$slots:{default:[ae]},$$scope:{ctx:j}}}),y=new B({props:{code:"cGlwJTIwaW5zdGFsbCUyMGdpdCUyQmh0dHBzJTNBJTJGJTJGZ2l0aHViLmNvbSUyRmh1Z2dpbmdmYWNlJTJGZGlmZnVzZXJzLmdpdCUwQXBpcCUyMGluc3RhbGwlMjAtVSUyMC1yJTIwcmVxdWlyZW1lbnRzLnR4dA==",highlighted:`pip install git+https://github.com/huggingface/diffusers.git | |
| pip install -U -r requirements.txt`,wrap:!1}}),R=new B({props:{code:"YWNjZWxlcmF0ZSUyMGNvbmZpZw==",highlighted:"accelerate config",wrap:!1}}),k=new pt({props:{title:"하드웨어 요구 사항",local:"하드웨어-요구-사항",headingTag:"h3"}}),A=new pt({props:{title:"Hub에 모델 업로드하기",local:"hub에-모델-업로드하기",headingTag:"h2"}}),H=new B({props:{code:"JTIwJTIwLS1wdXNoX3RvX2h1Yg==",highlighted:" --push_to_hub",wrap:!1}}),Q=new pt({props:{title:"체크포인트 저장 및 불러오기",local:"체크포인트-저장-및-불러오기",headingTag:"h2"}}),D=new B({props:{code:"JTIwJTIwLS1jaGVja3BvaW50aW5nX3N0ZXBzJTNENTAw",highlighted:" --checkpointing_steps=500",wrap:!1}}),K=new B({props:{code:"JTIwJTIwLS1yZXN1bWVfZnJvbV9jaGVja3BvaW50JTNEJTIyY2hlY2twb2ludC0xNTAwJTIy",highlighted:' --resume_from_checkpoint=<span class="hljs-string">"checkpoint-1500"</span>',wrap:!1}}),tt=new pt({props:{title:"파인튜닝",local:"파인튜닝",headingTag:"h2"}}),v=new Qt({props:{pytorch:!0,tensorflow:!1,jax:!0,$$slots:{jax:[ie],pytorch:[ne]},$$scope:{ctx:j}}}),et=new pt({props:{title:"LoRA",local:"lora",headingTag:"h2"}}),at=new pt({props:{title:"추론",local:"추론",headingTag:"h2"}}),E=new Qt({props:{pytorch:!0,tensorflow:!1,jax:!0,$$slots:{jax:[fe],pytorch:[me]},$$scope:{ctx:j}}}),nt=new le({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/ko/training/text2image.md"}}),{c(){l=w("meta"),r=o(),e=w("p"),i=o(),u(c.$$.fragment),C=o(),u(g.$$.fragment),Z=o(),$=w("p"),$.innerHTML=N,W=o(),U=w("p"),U.textContent=I,m=o(),u(y.$$.fragment),V=o(),X=w("p"),X.innerHTML=it,F=o(),u(R.$$.fragment),G=o(),p=w("p"),p.textContent=x,ot=o(),u(k.$$.fragment),ft=o(),L=w("p"),L.innerHTML=Bt,ct=o(),S=w("p"),S.innerHTML=Vt,ut=o(),Y=w("p"),Y.textContent=Ft,Mt=o(),u(A.$$.fragment),dt=o(),z=w("p"),z.textContent=Gt,Jt=o(),u(H.$$.fragment),bt=o(),u(Q.$$.fragment),ht=o(),q=w("p"),q.textContent=Nt,wt=o(),u(D.$$.fragment),Tt=o(),P=w("p"),P.textContent=vt,$t=o(),O=w("p"),O.textContent=Et,Ut=o(),u(K.$$.fragment),_t=o(),u(tt.$$.fragment),yt=o(),u(v.$$.fragment),gt=o(),u(et.$$.fragment),jt=o(),lt=w("p"),lt.innerHTML=kt,xt=o(),u(at.$$.fragment),Ct=o(),st=w("p"),st.innerHTML=Lt,Xt=o(),u(E.$$.fragment),Rt=o(),u(nt.$$.fragment),Zt=o(),rt=w("p"),this.h()},l(t){const a=Kt("svelte-u9bgzb",document.head);l=T(a,"META",{name:!0,content:!0}),a.forEach(s),r=f(t),e=T(t,"P",{}),zt(e).forEach(s),i=f(t),M(c.$$.fragment,t),C=f(t),M(g.$$.fragment,t),Z=f(t),$=T(t,"P",{"data-svelte-h":!0}),_($)!=="svelte-6focsf"&&($.innerHTML=N),W=f(t),U=T(t,"P",{"data-svelte-h":!0}),_(U)!=="svelte-d6uma"&&(U.textContent=I),m=f(t),M(y.$$.fragment,t),V=f(t),X=T(t,"P",{"data-svelte-h":!0}),_(X)!=="svelte-1q84qbk"&&(X.innerHTML=it),F=f(t),M(R.$$.fragment,t),G=f(t),p=T(t,"P",{"data-svelte-h":!0}),_(p)!=="svelte-9go34t"&&(p.textContent=x),ot=f(t),M(k.$$.fragment,t),ft=f(t),L=T(t,"P",{"data-svelte-h":!0}),_(L)!=="svelte-1x90o2u"&&(L.innerHTML=Bt),ct=f(t),S=T(t,"P",{"data-svelte-h":!0}),_(S)!=="svelte-yc0tb6"&&(S.innerHTML=Vt),ut=f(t),Y=T(t,"P",{"data-svelte-h":!0}),_(Y)!=="svelte-1ksou9j"&&(Y.textContent=Ft),Mt=f(t),M(A.$$.fragment,t),dt=f(t),z=T(t,"P",{"data-svelte-h":!0}),_(z)!=="svelte-1cshqoz"&&(z.textContent=Gt),Jt=f(t),M(H.$$.fragment,t),bt=f(t),M(Q.$$.fragment,t),ht=f(t),q=T(t,"P",{"data-svelte-h":!0}),_(q)!=="svelte-dt8b2s"&&(q.textContent=Nt),wt=f(t),M(D.$$.fragment,t),Tt=f(t),P=T(t,"P",{"data-svelte-h":!0}),_(P)!=="svelte-1rb33uz"&&(P.textContent=vt),$t=f(t),O=T(t,"P",{"data-svelte-h":!0}),_(O)!=="svelte-19yki7i"&&(O.textContent=Et),Ut=f(t),M(K.$$.fragment,t),_t=f(t),M(tt.$$.fragment,t),yt=f(t),M(v.$$.fragment,t),gt=f(t),M(et.$$.fragment,t),jt=f(t),lt=T(t,"P",{"data-svelte-h":!0}),_(lt)!=="svelte-1dpocf9"&&(lt.innerHTML=kt),xt=f(t),M(at.$$.fragment,t),Ct=f(t),st=T(t,"P",{"data-svelte-h":!0}),_(st)!=="svelte-1an9epu"&&(st.innerHTML=Lt),Xt=f(t),M(E.$$.fragment,t),Rt=f(t),M(nt.$$.fragment,t),Zt=f(t),rt=T(t,"P",{}),zt(rt).forEach(s),this.h()},h(){Ht(l,"name","hf:doc:metadata"),Ht(l,"content",ue)},m(t,a){te(document.head,l),n(t,r,a),n(t,e,a),n(t,i,a),d(c,t,a),n(t,C,a),d(g,t,a),n(t,Z,a),n(t,$,a),n(t,W,a),n(t,U,a),n(t,m,a),d(y,t,a),n(t,V,a),n(t,X,a),n(t,F,a),d(R,t,a),n(t,G,a),n(t,p,a),n(t,ot,a),d(k,t,a),n(t,ft,a),n(t,L,a),n(t,ct,a),n(t,S,a),n(t,ut,a),n(t,Y,a),n(t,Mt,a),d(A,t,a),n(t,dt,a),n(t,z,a),n(t,Jt,a),d(H,t,a),n(t,bt,a),d(Q,t,a),n(t,ht,a),n(t,q,a),n(t,wt,a),d(D,t,a),n(t,Tt,a),n(t,P,a),n(t,$t,a),n(t,O,a),n(t,Ut,a),d(K,t,a),n(t,_t,a),d(tt,t,a),n(t,yt,a),d(v,t,a),n(t,gt,a),d(et,t,a),n(t,jt,a),n(t,lt,a),n(t,xt,a),d(at,t,a),n(t,Ct,a),n(t,st,a),n(t,Xt,a),d(E,t,a),n(t,Rt,a),d(nt,t,a),n(t,Zt,a),n(t,rt,a),Wt=!0},p(t,[a]){const St={};a&2&&(St.$$scope={dirty:a,ctx:t}),g.$set(St);const Yt={};a&2&&(Yt.$$scope={dirty:a,ctx:t}),v.$set(Yt);const At={};a&2&&(At.$$scope={dirty:a,ctx:t}),E.$set(At)},i(t){Wt||(J(c.$$.fragment,t),J(g.$$.fragment,t),J(y.$$.fragment,t),J(R.$$.fragment,t),J(k.$$.fragment,t),J(A.$$.fragment,t),J(H.$$.fragment,t),J(Q.$$.fragment,t),J(D.$$.fragment,t),J(K.$$.fragment,t),J(tt.$$.fragment,t),J(v.$$.fragment,t),J(et.$$.fragment,t),J(at.$$.fragment,t),J(E.$$.fragment,t),J(nt.$$.fragment,t),Wt=!0)},o(t){b(c.$$.fragment,t),b(g.$$.fragment,t),b(y.$$.fragment,t),b(R.$$.fragment,t),b(k.$$.fragment,t),b(A.$$.fragment,t),b(H.$$.fragment,t),b(Q.$$.fragment,t),b(D.$$.fragment,t),b(K.$$.fragment,t),b(tt.$$.fragment,t),b(v.$$.fragment,t),b(et.$$.fragment,t),b(at.$$.fragment,t),b(E.$$.fragment,t),b(nt.$$.fragment,t),Wt=!1},d(t){t&&(s(r),s(e),s(i),s(C),s(Z),s($),s(W),s(U),s(m),s(V),s(X),s(F),s(G),s(p),s(ot),s(ft),s(L),s(ct),s(S),s(ut),s(Y),s(Mt),s(dt),s(z),s(Jt),s(bt),s(ht),s(q),s(wt),s(Tt),s(P),s($t),s(O),s(Ut),s(_t),s(yt),s(gt),s(jt),s(lt),s(xt),s(Ct),s(st),s(Xt),s(Rt),s(Zt),s(rt)),s(l),h(c,t),h(g,t),h(y,t),h(R,t),h(k,t),h(A,t),h(H,t),h(Q,t),h(D,t),h(K,t),h(tt,t),h(v,t),h(et,t),h(at,t),h(E,t),h(nt,t)}}}const ue='{"title":"Text-to-image","local":"text-to-image","sections":[{"title":"하드웨어 요구 사항","local":"하드웨어-요구-사항","sections":[],"depth":3},{"title":"Hub에 모델 업로드하기","local":"hub에-모델-업로드하기","sections":[],"depth":2},{"title":"체크포인트 저장 및 불러오기","local":"체크포인트-저장-및-불러오기","sections":[],"depth":2},{"title":"파인튜닝","local":"파인튜닝","sections":[],"depth":2},{"title":"LoRA","local":"lora","sections":[],"depth":2},{"title":"추론","local":"추론","sections":[],"depth":2}],"depth":1}';function Me(j){return Dt(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class $e extends Pt{constructor(l){super(),Ot(this,l,Me,ce,qt,{})}}export{$e as component}; | |
Xet Storage Details
- Size:
- 29.1 kB
- Xet hash:
- 573dd2036896d0ea314142d462308dc5a6d656f1070189d494f9d1e6f01e42d4
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.