Buckets:

rtrm's picture
download
raw
12.7 kB
import{s as _e,o as ge,n as he}from"../chunks/scheduler.8c3d61f6.js";import{S as $e,i as be,g as d,s as o,r as b,A as we,h as u,f as n,c as r,j as X,u as w,x as J,k as N,y as m,a as i,v as y,d as v,t as T,w as D}from"../chunks/index.da70eac4.js";import{T as ye}from"../chunks/Tip.1d9b8c37.js";import{D as re}from"../chunks/Docstring.0b9cc58b.js";import{C as ve}from"../chunks/CodeBlock.a9c4becf.js";import{E as Te}from"../chunks/ExampleCodeBlock.ba0ba69d.js";import{H as le,E as De}from"../chunks/index.a831177d.js";function Me(W){let s,g='Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.';return{c(){s=d("p"),s.innerHTML=g},l(l){s=u(l,"P",{"data-svelte-h":!0}),J(s)!=="svelte-1qn15hi"&&(s.innerHTML=g)},m(l,p){i(l,s,p)},p:he,d(l){l&&n(s)}}}function Pe(W){let s,g="Example:",l,p,c;return p=new ve({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBZnJvbSUyMHNjaXB5LmlvLndhdmZpbGUlMjBpbXBvcnQlMjB3cml0ZSUwQSUwQW1vZGVsX2lkJTIwJTNEJTIwJTIyaGFybW9uYWklMkZtYWVzdHJvLTE1MGslMjIlMEFwaXBlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKG1vZGVsX2lkKSUwQXBpcGUlMjAlM0QlMjBwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBYXVkaW9zJTIwJTNEJTIwcGlwZShhdWRpb19sZW5ndGhfaW5fcyUzRDQuMCkuYXVkaW9zJTBBJTBBJTIzJTIwVG8lMjBzYXZlJTIwbG9jYWxseSUwQWZvciUyMGklMkMlMjBhdWRpbyUyMGluJTIwZW51bWVyYXRlKGF1ZGlvcyklM0ElMEElMjAlMjAlMjAlMjB3cml0ZShmJTIybWFlc3Ryb190ZXN0XyU3QmklN0Qud2F2JTIyJTJDJTIwcGlwZS51bmV0LnNhbXBsZV9yYXRlJTJDJTIwYXVkaW8udHJhbnNwb3NlKCkpJTBBJTBBJTIzJTIwVG8lMjBkaXNwbGF5JTIwaW4lMjBnb29nbGUlMjBjb2xhYiUwQWltcG9ydCUyMElQeXRob24uZGlzcGxheSUyMGFzJTIwaXBkJTBBJTBBZm9yJTIwYXVkaW8lMjBpbiUyMGF1ZGlvcyUzQSUwQSUyMCUyMCUyMCUyMGRpc3BsYXkoaXBkLkF1ZGlvKGF1ZGlvJTJDJTIwcmF0ZSUzRHBpcGUudW5ldC5zYW1wbGVfcmF0ZSkp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline
<span class="hljs-keyword">from</span> scipy.io.wavfile <span class="hljs-keyword">import</span> write
model_id = <span class="hljs-string">&quot;harmonai/maestro-150k&quot;</span>
pipe = DiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
audios = pipe(audio_length_in_s=<span class="hljs-number">4.0</span>).audios
<span class="hljs-comment"># To save locally</span>
<span class="hljs-keyword">for</span> i, audio <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(audios):
write(<span class="hljs-string">f&quot;maestro_test_<span class="hljs-subst">{i}</span>.wav&quot;</span>, pipe.unet.sample_rate, audio.transpose())
<span class="hljs-comment"># To display in google colab</span>
<span class="hljs-keyword">import</span> IPython.display <span class="hljs-keyword">as</span> ipd
<span class="hljs-keyword">for</span> audio <span class="hljs-keyword">in</span> audios:
display(ipd.Audio(audio, rate=pipe.unet.sample_rate))`,wrap:!1}}),{c(){s=d("p"),s.textContent=g,l=o(),b(p.$$.fragment)},l(a){s=u(a,"P",{"data-svelte-h":!0}),J(s)!=="svelte-11lpom8"&&(s.textContent=g),l=r(a),w(p.$$.fragment,a)},m(a,h){i(a,s,h),i(a,l,h),y(p,a,h),c=!0},p:he,i(a){c||(v(p.$$.fragment,a),c=!0)},o(a){T(p.$$.fragment,a),c=!1},d(a){a&&(n(s),n(l)),D(p,a)}}}function xe(W){let s,g,l,p,c,a,h,pe='<a href="https://github.com/Harmonai-org/sample-generator" rel="nofollow">Dance Diffusion</a> is by Zach Evans.',L,k,de='Dance Diffusion is the first in a suite of generative audio tools for producers and musicians released by <a href="https://github.com/Harmonai-org" rel="nofollow">Harmonai</a>.',H,x,O,B,R,f,j,te,E,ue="Pipeline for audio generation.",ne,z,fe=`This model inherits from <a href="/docs/diffusers/pr_11452/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,se,$,U,ie,A,ce="The call function to the pipeline for generation.",ae,G,F,C,Q,M,Z,oe,S,me="Output class for audio pipelines.",Y,I,q,V,K;return c=new le({props:{title:"Dance Diffusion",local:"dance-diffusion",headingTag:"h1"}}),x=new ye({props:{$$slots:{default:[Me]},$$scope:{ctx:W}}}),B=new le({props:{title:"DanceDiffusionPipeline",local:"diffusers.DanceDiffusionPipeline",headingTag:"h2"}}),j=new re({props:{name:"class diffusers.DanceDiffusionPipeline",anchor:"diffusers.DanceDiffusionPipeline",parameters:[{name:"unet",val:": UNet1DModel"},{name:"scheduler",val:": SchedulerMixin"}],parametersDescription:[{anchor:"diffusers.DanceDiffusionPipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/pr_11452/en/api/models/unet#diffusers.UNet1DModel">UNet1DModel</a>) &#x2014;
A <code>UNet1DModel</code> to denoise the encoded audio.`,name:"unet"},{anchor:"diffusers.DanceDiffusionPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11452/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) &#x2014;
A scheduler to be used in combination with <code>unet</code> to denoise the encoded audio latents. Can be one of
<a href="/docs/diffusers/pr_11452/en/api/schedulers/ipndm#diffusers.IPNDMScheduler">IPNDMScheduler</a>.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_11452/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py#L37"}}),U=new re({props:{name:"__call__",anchor:"diffusers.DanceDiffusionPipeline.__call__",parameters:[{name:"batch_size",val:": int = 1"},{name:"num_inference_steps",val:": int = 100"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"audio_length_in_s",val:": typing.Optional[float] = None"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.DanceDiffusionPipeline.__call__.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of audio samples to generate.`,name:"batch_size"},{anchor:"diffusers.DanceDiffusionPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at
the expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.DanceDiffusionPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) &#x2014;
A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make
generation deterministic.`,name:"generator"},{anchor:"diffusers.DanceDiffusionPipeline.__call__.audio_length_in_s",description:`<strong>audio_length_in_s</strong> (<code>float</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size/self.unet.config.sample_rate</code>) &#x2014;
The length of the generated audio sample in seconds.`,name:"audio_length_in_s"},{anchor:"diffusers.DanceDiffusionPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <a href="/docs/diffusers/pr_11452/en/api/pipelines/audioldm#diffusers.AudioPipelineOutput">AudioPipelineOutput</a> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_11452/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py#L58",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>If <code>return_dict</code> is <code>True</code>, <a
href="/docs/diffusers/pr_11452/en/api/pipelines/audioldm#diffusers.AudioPipelineOutput"
>AudioPipelineOutput</a> is returned, otherwise a <code>tuple</code> is
returned where the first element is a list with the generated audio.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><a
href="/docs/diffusers/pr_11452/en/api/pipelines/audioldm#diffusers.AudioPipelineOutput"
>AudioPipelineOutput</a> or <code>tuple</code></p>
`}}),G=new Te({props:{anchor:"diffusers.DanceDiffusionPipeline.__call__.example",$$slots:{default:[Pe]},$$scope:{ctx:W}}}),C=new le({props:{title:"AudioPipelineOutput",local:"diffusers.AudioPipelineOutput",headingTag:"h2"}}),Z=new re({props:{name:"class diffusers.AudioPipelineOutput",anchor:"diffusers.AudioPipelineOutput",parameters:[{name:"audios",val:": ndarray"}],parametersDescription:[{anchor:"diffusers.AudioPipelineOutput.audios",description:`<strong>audios</strong> (<code>np.ndarray</code>) &#x2014;
List of denoised audio samples of a NumPy array of shape <code>(batch_size, num_channels, sample_rate)</code>.`,name:"audios"}],source:"https://github.com/huggingface/diffusers/blob/vr_11452/src/diffusers/pipelines/pipeline_utils.py#L128"}}),I=new De({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/dance_diffusion.md"}}),{c(){s=d("meta"),g=o(),l=d("p"),p=o(),b(c.$$.fragment),a=o(),h=d("p"),h.innerHTML=pe,L=o(),k=d("p"),k.innerHTML=de,H=o(),b(x.$$.fragment),O=o(),b(B.$$.fragment),R=o(),f=d("div"),b(j.$$.fragment),te=o(),E=d("p"),E.textContent=ue,ne=o(),z=d("p"),z.innerHTML=fe,se=o(),$=d("div"),b(U.$$.fragment),ie=o(),A=d("p"),A.textContent=ce,ae=o(),b(G.$$.fragment),F=o(),b(C.$$.fragment),Q=o(),M=d("div"),b(Z.$$.fragment),oe=o(),S=d("p"),S.textContent=me,Y=o(),b(I.$$.fragment),q=o(),V=d("p"),this.h()},l(e){const t=we("svelte-u9bgzb",document.head);s=u(t,"META",{name:!0,content:!0}),t.forEach(n),g=r(e),l=u(e,"P",{}),X(l).forEach(n),p=r(e),w(c.$$.fragment,e),a=r(e),h=u(e,"P",{"data-svelte-h":!0}),J(h)!=="svelte-z4ffbo"&&(h.innerHTML=pe),L=r(e),k=u(e,"P",{"data-svelte-h":!0}),J(k)!=="svelte-dzyais"&&(k.innerHTML=de),H=r(e),w(x.$$.fragment,e),O=r(e),w(B.$$.fragment,e),R=r(e),f=u(e,"DIV",{class:!0});var _=X(f);w(j.$$.fragment,_),te=r(_),E=u(_,"P",{"data-svelte-h":!0}),J(E)!=="svelte-1jvczvp"&&(E.textContent=ue),ne=r(_),z=u(_,"P",{"data-svelte-h":!0}),J(z)!=="svelte-12ax5of"&&(z.innerHTML=fe),se=r(_),$=u(_,"DIV",{class:!0});var P=X($);w(U.$$.fragment,P),ie=r(P),A=u(P,"P",{"data-svelte-h":!0}),J(A)!=="svelte-50j04k"&&(A.textContent=ce),ae=r(P),w(G.$$.fragment,P),P.forEach(n),_.forEach(n),F=r(e),w(C.$$.fragment,e),Q=r(e),M=u(e,"DIV",{class:!0});var ee=X(M);w(Z.$$.fragment,ee),oe=r(ee),S=u(ee,"P",{"data-svelte-h":!0}),J(S)!=="svelte-19ryw33"&&(S.textContent=me),ee.forEach(n),Y=r(e),w(I.$$.fragment,e),q=r(e),V=u(e,"P",{}),X(V).forEach(n),this.h()},h(){N(s,"name","hf:doc:metadata"),N(s,"content",Ge),N($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),N(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),N(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){m(document.head,s),i(e,g,t),i(e,l,t),i(e,p,t),y(c,e,t),i(e,a,t),i(e,h,t),i(e,L,t),i(e,k,t),i(e,H,t),y(x,e,t),i(e,O,t),y(B,e,t),i(e,R,t),i(e,f,t),y(j,f,null),m(f,te),m(f,E),m(f,ne),m(f,z),m(f,se),m(f,$),y(U,$,null),m($,ie),m($,A),m($,ae),y(G,$,null),i(e,F,t),y(C,e,t),i(e,Q,t),i(e,M,t),y(Z,M,null),m(M,oe),m(M,S),i(e,Y,t),y(I,e,t),i(e,q,t),i(e,V,t),K=!0},p(e,[t]){const _={};t&2&&(_.$$scope={dirty:t,ctx:e}),x.$set(_);const P={};t&2&&(P.$$scope={dirty:t,ctx:e}),G.$set(P)},i(e){K||(v(c.$$.fragment,e),v(x.$$.fragment,e),v(B.$$.fragment,e),v(j.$$.fragment,e),v(U.$$.fragment,e),v(G.$$.fragment,e),v(C.$$.fragment,e),v(Z.$$.fragment,e),v(I.$$.fragment,e),K=!0)},o(e){T(c.$$.fragment,e),T(x.$$.fragment,e),T(B.$$.fragment,e),T(j.$$.fragment,e),T(U.$$.fragment,e),T(G.$$.fragment,e),T(C.$$.fragment,e),T(Z.$$.fragment,e),T(I.$$.fragment,e),K=!1},d(e){e&&(n(g),n(l),n(p),n(a),n(h),n(L),n(k),n(H),n(O),n(R),n(f),n(F),n(Q),n(M),n(Y),n(q),n(V)),n(s),D(c,e),D(x,e),D(B,e),D(j),D(U),D(G),D(C,e),D(Z),D(I,e)}}}const Ge='{"title":"Dance Diffusion","local":"dance-diffusion","sections":[{"title":"DanceDiffusionPipeline","local":"diffusers.DanceDiffusionPipeline","sections":[],"depth":2},{"title":"AudioPipelineOutput","local":"diffusers.AudioPipelineOutput","sections":[],"depth":2}],"depth":1}';function Je(W){return ge(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class We extends $e{constructor(s){super(),be(this,s,Je,xe,_e,{})}}export{We as component};

Xet Storage Details

Size:
12.7 kB
·
Xet hash:
74ed677be0ba4ebc9d4f880fce4c27ba5112e286d8b666070e8a260599198229

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.