Buckets:
| import{s as bt,o as Mt,n as _t}from"../chunks/scheduler.8c3d61f6.js";import{S as yt,i as wt,g as o,s as i,r as g,A as Tt,h as r,f as n,c as a,j as $,u as f,x as u,k as U,y as s,a as l,v as h,d as _,t as b,w as M}from"../chunks/index.da70eac4.js";import{T as vt}from"../chunks/Tip.1d9b8c37.js";import{D as te}from"../chunks/Docstring.d7448bb3.js";import{C as Ce}from"../chunks/CodeBlock.a9c4becf.js";import{E as Gt}from"../chunks/ExampleCodeBlock.bdbc5937.js";import{H as Ke,E as Jt}from"../chunks/getInferenceSnippets.1d18021a.js";function jt(ne){let p,j='Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers.md">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading.md#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.';return{c(){p=o("p"),p.innerHTML=j},l(y){p=r(y,"P",{"data-svelte-h":!0}),u(p)!=="svelte-w7r39y"&&(p.innerHTML=j)},m(y,w){l(y,p,w)},p:_t,d(y){y&&n(p)}}}function It(ne){let p,j="Examples:",y,w,T;return w=new Ce({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwT21uaUdlblBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyME9tbmlHZW5QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyU2hpdGFvJTJGT21uaUdlbi12MS1kaWZmdXNlcnMlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwY2F0JTIwaG9sZGluZyUyMGElMjBzaWduJTIwdGhhdCUyMHNheXMlMjBoZWxsbyUyMHdvcmxkJTIyJTBBJTIzJTIwRGVwZW5kaW5nJTIwb24lMjB0aGUlMjB2YXJpYW50JTIwYmVpbmclMjB1c2VkJTJDJTIwdGhlJTIwcGlwZWxpbmUlMjBjYWxsJTIwd2lsbCUyMHNsaWdodGx5JTIwdmFyeS4lMEElMjMlMjBSZWZlciUyMHRvJTIwdGhlJTIwcGlwZWxpbmUlMjBkb2N1bWVudGF0aW9uJTIwZm9yJTIwbW9yZSUyMGRldGFpbHMuJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCUyQyUyMGd1aWRhbmNlX3NjYWxlJTNEMi41KS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> OmniGenPipeline | |
| <span class="hljs-meta">>>> </span>pipe = OmniGenPipeline.from_pretrained(<span class="hljs-string">"Shitao/OmniGen-v1-diffusers"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt, num_inference_steps=<span class="hljs-number">50</span>, guidance_scale=<span class="hljs-number">2.5</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),{c(){p=o("p"),p.textContent=j,y=i(),g(w.$$.fragment)},l(m){p=r(m,"P",{"data-svelte-h":!0}),u(p)!=="svelte-kvfsh7"&&(p.textContent=j),y=a(m),f(w.$$.fragment,m)},m(m,G){l(m,p,G),l(m,y,G),h(w,m,G),T=!0},p:_t,i(m){T||(_(w.$$.fragment,m),T=!0)},o(m){b(w.$$.fragment,m),T=!1},d(m){m&&(n(p),n(y)),M(w,m)}}}function $t(ne){let p,j,y,w,T,m,G,et='<a href="https://huggingface.co/papers/2409.11340" rel="nofollow">OmniGen: Unified Image Generation</a> from BAAI, by Shitao Xiao, Yueze Wang, Junjie Zhou, Huaying Yuan, Xingrun Xing, Ruiran Yan, Chaofan Li, Shuting Wang, Tiejun Huang, Zheng Liu.',fe,P,tt="The abstract from the paper is:",he,V,nt='<em>The emergence of Large Language Models (LLMs) has unified language generation tasks and revolutionized human-machine interaction. However, in the realm of image generation, a unified model capable of handling various tasks within a single framework remains largely unexplored. In this work, we introduce OmniGen, a new diffusion model for unified image generation. OmniGen is characterized by the following features: 1) Unification: OmniGen not only demonstrates text-to-image generation capabilities but also inherently supports various downstream tasks, such as image editing, subject-driven generation, and visual conditional generation. 2) Simplicity: The architecture of OmniGen is highly simplified, eliminating the need for additional plugins. Moreover, compared to existing diffusion models, it is more user-friendly and can complete complex tasks end-to-end through instructions without the need for extra intermediate steps, greatly simplifying the image generation workflow. 3) Knowledge Transfer: Benefit from learning in a unified format, OmniGen effectively transfers knowledge across different tasks, manages unseen tasks and domains, and exhibits novel capabilities. We also explore the model’s reasoning capabilities and potential applications of the chain-of-thought mechanism. This work represents the first attempt at a general-purpose image generation model, and we will release our resources at <a href="https://github.com/VectorSpaceLab/OmniGen" rel="nofollow">https://github.com/VectorSpaceLab/OmniGen</a> to foster future advancements.</em>',_e,x,be,O,it='This pipeline was contributed by <a href="https://github.com/staoxiao" rel="nofollow">staoxiao</a>. The original codebase can be found <a href="https://github.com/VectorSpaceLab/OmniGen" rel="nofollow">here</a>. The original weights can be found under <a href="https://huggingface.co/Shitao/OmniGen-v1" rel="nofollow">hf.co/shitao</a>.',Me,L,ye,E,at="First, load the pipeline:",we,z,Te,Y,st=`For text-to-image, pass a text prompt. By default, OmniGen generates a 1024x1024 image. | |
| You can try setting the <code>height</code> and <code>width</code> parameters to generate images with different size.`,ve,H,Ge,R,lt=`OmniGen supports multimodal inputs. | |
| When the input includes an image, you need to add a placeholder <code><img><|image_1|></img></code> in the text prompt to represent the image. | |
| It is recommended to enable <code>use_input_image_size_as_output</code> to keep the edited image the same size as the original image.`,Je,X,je,S,Ie,d,A,Pe,ie,ot="The OmniGen pipeline for multimodal-to-image generation.",Ve,ae,rt='Reference: <a href="https://huggingface.co/papers/2409.11340" rel="nofollow">https://huggingface.co/papers/2409.11340</a>',Oe,v,F,Le,se,pt="Function invoked when calling the pipeline for generation.",Ee,k,ze,le,dt=`Returns: <a href="/docs/diffusers/pr_11743/en/api/pipelines/unclip#diffusers.ImagePipelineOutput">ImagePipelineOutput</a> or <code>tuple</code>: | |
| If <code>return_dict</code> is <code>True</code>, <a href="/docs/diffusers/pr_11743/en/api/pipelines/unclip#diffusers.ImagePipelineOutput">ImagePipelineOutput</a> is returned, otherwise a <code>tuple</code> is returned | |
| where the first element is a list with the generated images.`,Ye,Z,N,He,oe,ct=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Re,W,Q,Xe,re,mt=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Se,B,D,Ae,pe,ut=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Fe,C,q,Ne,de,gt=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,Qe,I,K,De,ce,ft="get the continue embedding of input images by VAE",qe,me,ht="Returns: torch.Tensor",$e,ee,Ue,ge,xe;return T=new Ke({props:{title:"OmniGen",local:"omnigen",headingTag:"h1"}}),x=new vt({props:{$$slots:{default:[jt]},$$scope:{ctx:ne}}}),L=new Ke({props:{title:"Inference",local:"inference",headingTag:"h2"}}),z=new Ce({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwT21uaUdlblBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyME9tbmlHZW5QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyU2hpdGFvJTJGT21uaUdlbi12MS1kaWZmdXNlcnMlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUudG8oJTIyY3VkYSUyMik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> OmniGenPipeline | |
| pipe = OmniGenPipeline.from_pretrained(<span class="hljs-string">"Shitao/OmniGen-v1-diffusers"</span>, torch_dtype=torch.bfloat16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>)`,wrap:!1}}),H=new Ce({props:{code:"cHJvbXB0JTIwJTNEJTIwJTIyUmVhbGlzdGljJTIwcGhvdG8uJTIwQSUyMHlvdW5nJTIwd29tYW4lMjBzaXRzJTIwb24lMjBhJTIwc29mYSUyQyUyMGhvbGRpbmclMjBhJTIwYm9vayUyMGFuZCUyMGZhY2luZyUyMHRoZSUyMGNhbWVyYS4lMjBTaGUlMjB3ZWFycyUyMGRlbGljYXRlJTIwc2lsdmVyJTIwaG9vcCUyMGVhcnJpbmdzJTIwYWRvcm5lZCUyMHdpdGglMjB0aW55JTJDJTIwc3BhcmtsaW5nJTIwZGlhbW9uZHMlMjB0aGF0JTIwY2F0Y2glMjB0aGUlMjBsaWdodCUyQyUyMHdpdGglMjBoZXIlMjBsb25nJTIwY2hlc3RudXQlMjBoYWlyJTIwY2FzY2FkaW5nJTIwb3ZlciUyMGhlciUyMHNob3VsZGVycy4lMjBIZXIlMjBleWVzJTIwYXJlJTIwZm9jdXNlZCUyMGFuZCUyMGdlbnRsZSUyQyUyMGZyYW1lZCUyMGJ5JTIwbG9uZyUyQyUyMGRhcmslMjBsYXNoZXMuJTIwU2hlJTIwaXMlMjBkcmVzc2VkJTIwaW4lMjBhJTIwY296eSUyMGNyZWFtJTIwc3dlYXRlciUyQyUyMHdoaWNoJTIwY29tcGxlbWVudHMlMjBoZXIlMjB3YXJtJTJDJTIwaW52aXRpbmclMjBzbWlsZS4lMjBCZWhpbmQlMjBoZXIlMkMlMjB0aGVyZSUyMGlzJTIwYSUyMHRhYmxlJTIwd2l0aCUyMGElMjBjdXAlMjBvZiUyMHdhdGVyJTIwaW4lMjBhJTIwc2xlZWslMkMlMjBtaW5pbWFsaXN0JTIwYmx1ZSUyMG11Zy4lMjBUaGUlMjBiYWNrZ3JvdW5kJTIwaXMlMjBhJTIwc2VyZW5lJTIwaW5kb29yJTIwc2V0dGluZyUyMHdpdGglMjBzb2Z0JTIwbmF0dXJhbCUyMGxpZ2h0JTIwZmlsdGVyaW5nJTIwdGhyb3VnaCUyMGElMjB3aW5kb3clMkMlMjBhZG9ybmVkJTIwd2l0aCUyMHRhc3RlZnVsJTIwYXJ0JTIwYW5kJTIwZmxvd2VycyUyQyUyMGNyZWF0aW5nJTIwYSUyMGNvenklMjBhbmQlMjBwZWFjZWZ1bCUyMGFtYmlhbmNlLiUyMDRLJTJDJTIwSEQuJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDMlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoZGV2aWNlJTNEJTIyY3B1JTIyKS5tYW51YWxfc2VlZCgxMTEpJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`prompt = <span class="hljs-string">"Realistic photo. A young woman sits on a sofa, holding a book and facing the camera. She wears delicate silver hoop earrings adorned with tiny, sparkling diamonds that catch the light, with her long chestnut hair cascading over her shoulders. Her eyes are focused and gentle, framed by long, dark lashes. She is dressed in a cozy cream sweater, which complements her warm, inviting smile. Behind her, there is a table with a cup of water in a sleek, minimalist blue mug. The background is a serene indoor setting with soft natural light filtering through a window, adorned with tasteful art and flowers, creating a cozy and peaceful ambiance. 4K, HD."</span> | |
| image = pipe( | |
| prompt=prompt, | |
| height=<span class="hljs-number">1024</span>, | |
| width=<span class="hljs-number">1024</span>, | |
| guidance_scale=<span class="hljs-number">3</span>, | |
| generator=torch.Generator(device=<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">111</span>), | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),X=new Ce({props:{code:"cHJvbXB0JTNEJTIyJTNDaW1nJTNFJTNDJTdDaW1hZ2VfMSU3QyUzRSUzQyUyRmltZyUzRSUyMFJlbW92ZSUyMHRoZSUyMHdvbWFuJ3MlMjBlYXJyaW5ncy4lMjBSZXBsYWNlJTIwdGhlJTIwbXVnJTIwd2l0aCUyMGElMjBjbGVhciUyMGdsYXNzJTIwZmlsbGVkJTIwd2l0aCUyMHNwYXJrbGluZyUyMGljZWQlMjBjb2xhLiUyMiUwQWlucHV0X2ltYWdlcyUzRCU1QmxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGVmVjdG9yU3BhY2VMYWIlMkZPbW5pR2VuJTJGbWFpbiUyRmltZ3MlMkZkb2NzX2ltZyUyRnQyaV93b21hbl93aXRoX2Jvb2sucG5nJTIyKSU1RCUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMjAlMEElMjAlMjAlMjAlMjBpbnB1dF9pbWFnZXMlM0RpbnB1dF9pbWFnZXMlMkMlMjAlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDIlMkMlMjAlMEElMjAlMjAlMjAlMjBpbWdfZ3VpZGFuY2Vfc2NhbGUlM0QxLjYlMkMlMEElMjAlMjAlMjAlMjB1c2VfaW5wdXRfaW1hZ2Vfc2l6ZV9hc19vdXRwdXQlM0RUcnVlJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKGRldmljZSUzRCUyMmNwdSUyMikubWFudWFsX3NlZWQoMjIyKSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyb3V0cHV0LnBuZyUyMik=",highlighted:`prompt=<span class="hljs-string">"<img><|image_1|></img> Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola."</span> | |
| input_images=[load_image(<span class="hljs-string">"https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/imgs/docs_img/t2i_woman_with_book.png"</span>)] | |
| image = pipe( | |
| prompt=prompt, | |
| input_images=input_images, | |
| guidance_scale=<span class="hljs-number">2</span>, | |
| img_guidance_scale=<span class="hljs-number">1.6</span>, | |
| use_input_image_size_as_output=<span class="hljs-literal">True</span>, | |
| generator=torch.Generator(device=<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">222</span>)).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),S=new Ke({props:{title:"OmniGenPipeline",local:"diffusers.OmniGenPipeline",headingTag:"h2"}}),A=new te({props:{name:"class diffusers.OmniGenPipeline",anchor:"diffusers.OmniGenPipeline",parameters:[{name:"transformer",val:": OmniGenTransformer2DModel"},{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"tokenizer",val:": LlamaTokenizer"}],parametersDescription:[{anchor:"diffusers.OmniGenPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11743/en/api/models/omnigen_transformer#diffusers.OmniGenTransformer2DModel">OmniGenTransformer2DModel</a>) — | |
| Autoregressive Transformer architecture for OmniGen.`,name:"transformer"},{anchor:"diffusers.OmniGenPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11743/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.OmniGenPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11743/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.OmniGenPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>LlamaTokenizer</code>) — | |
| Text tokenizer of class. | |
| <a href="https://huggingface.co/docs/transformers/main/model_doc/llama#transformers.LlamaTokenizer" rel="nofollow">LlamaTokenizer</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_11743/src/diffusers/pipelines/omnigen/pipeline_omnigen.py#L117"}}),F=new te({props:{name:"__call__",anchor:"diffusers.OmniGenPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"input_images",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], typing.List[typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"max_input_image_size",val:": int = 1024"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 2.5"},{name:"img_guidance_scale",val:": float = 1.6"},{name:"use_input_image_size_as_output",val:": bool = False"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"}],parametersDescription:[{anchor:"diffusers.OmniGenPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If the input includes images, need to add | |
| placeholders <code><img><|image_i|></img></code> in the prompt to indicate the position of the i-th images.`,name:"prompt"},{anchor:"diffusers.OmniGenPipeline.__call__.input_images",description:`<strong>input_images</strong> (<code>PipelineImageInput</code> or <code>List[PipelineImageInput]</code>, <em>optional</em>) — | |
| The list of input images. We will replace the ”<|image_i|>” in prompt with the i-th image in list.`,name:"input_images"},{anchor:"diffusers.OmniGenPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.OmniGenPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.OmniGenPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.OmniGenPipeline.__call__.max_input_image_size",description:`<strong>max_input_image_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1024) — | |
| the maximum size of input image, which will be used to crop the input image to the maximum size`,name:"max_input_image_size"},{anchor:"diffusers.OmniGenPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.OmniGenPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 2.5) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.OmniGenPipeline.__call__.img_guidance_scale",description:`<strong>img_guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.6) — | |
| Defined as equation 3 in <a href="https://huggingface.co/papers/2211.09800" rel="nofollow">Instrucpix2pix</a>.`,name:"img_guidance_scale"},{anchor:"diffusers.OmniGenPipeline.__call__.use_input_image_size_as_output",description:`<strong>use_input_image_size_as_output</strong> (bool, defaults to False) — | |
| whether to use the input image size as the output image size, which can be used for single-image input, | |
| e.g., image editing task`,name:"use_input_image_size_as_output"},{anchor:"diffusers.OmniGenPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.OmniGenPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.OmniGenPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.OmniGenPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.OmniGenPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.OmniGenPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.OmniGenPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/vr_11743/src/diffusers/pipelines/omnigen/pipeline_omnigen.py#L304"}}),k=new Gt({props:{anchor:"diffusers.OmniGenPipeline.__call__.example",$$slots:{default:[It]},$$scope:{ctx:ne}}}),N=new te({props:{name:"disable_vae_slicing",anchor:"diffusers.OmniGenPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11743/src/diffusers/pipelines/omnigen/pipeline_omnigen.py#L238"}}),Q=new te({props:{name:"disable_vae_tiling",anchor:"diffusers.OmniGenPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11743/src/diffusers/pipelines/omnigen/pipeline_omnigen.py#L253"}}),D=new te({props:{name:"enable_vae_slicing",anchor:"diffusers.OmniGenPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11743/src/diffusers/pipelines/omnigen/pipeline_omnigen.py#L231"}}),q=new te({props:{name:"enable_vae_tiling",anchor:"diffusers.OmniGenPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11743/src/diffusers/pipelines/omnigen/pipeline_omnigen.py#L245"}}),K=new te({props:{name:"encode_input_images",anchor:"diffusers.OmniGenPipeline.encode_input_images",parameters:[{name:"input_pixel_values",val:": typing.List[torch.Tensor]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"dtype",val:": typing.Optional[torch.dtype] = None"}],parametersDescription:[{anchor:"diffusers.OmniGenPipeline.encode_input_images.input_pixel_values",description:"<strong>input_pixel_values</strong> — normalized pixel of input images",name:"input_pixel_values"},{anchor:"diffusers.OmniGenPipeline.encode_input_images.device",description:"<strong>device</strong> —",name:"device"}],source:"https://github.com/huggingface/diffusers/blob/vr_11743/src/diffusers/pipelines/omnigen/pipeline_omnigen.py#L169"}}),ee=new Jt({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/omnigen.md"}}),{c(){p=o("meta"),j=i(),y=o("p"),w=i(),g(T.$$.fragment),m=i(),G=o("p"),G.innerHTML=et,fe=i(),P=o("p"),P.textContent=tt,he=i(),V=o("p"),V.innerHTML=nt,_e=i(),g(x.$$.fragment),be=i(),O=o("p"),O.innerHTML=it,Me=i(),g(L.$$.fragment),ye=i(),E=o("p"),E.textContent=at,we=i(),g(z.$$.fragment),Te=i(),Y=o("p"),Y.innerHTML=st,ve=i(),g(H.$$.fragment),Ge=i(),R=o("p"),R.innerHTML=lt,Je=i(),g(X.$$.fragment),je=i(),g(S.$$.fragment),Ie=i(),d=o("div"),g(A.$$.fragment),Pe=i(),ie=o("p"),ie.textContent=ot,Ve=i(),ae=o("p"),ae.innerHTML=rt,Oe=i(),v=o("div"),g(F.$$.fragment),Le=i(),se=o("p"),se.textContent=pt,Ee=i(),g(k.$$.fragment),ze=i(),le=o("p"),le.innerHTML=dt,Ye=i(),Z=o("div"),g(N.$$.fragment),He=i(),oe=o("p"),oe.innerHTML=ct,Re=i(),W=o("div"),g(Q.$$.fragment),Xe=i(),re=o("p"),re.innerHTML=mt,Se=i(),B=o("div"),g(D.$$.fragment),Ae=i(),pe=o("p"),pe.textContent=ut,Fe=i(),C=o("div"),g(q.$$.fragment),Ne=i(),de=o("p"),de.textContent=gt,Qe=i(),I=o("div"),g(K.$$.fragment),De=i(),ce=o("p"),ce.textContent=ft,qe=i(),me=o("p"),me.textContent=ht,$e=i(),g(ee.$$.fragment),Ue=i(),ge=o("p"),this.h()},l(e){const t=Tt("svelte-u9bgzb",document.head);p=r(t,"META",{name:!0,content:!0}),t.forEach(n),j=a(e),y=r(e,"P",{}),$(y).forEach(n),w=a(e),f(T.$$.fragment,e),m=a(e),G=r(e,"P",{"data-svelte-h":!0}),u(G)!=="svelte-18kttfi"&&(G.innerHTML=et),fe=a(e),P=r(e,"P",{"data-svelte-h":!0}),u(P)!=="svelte-1cwsb16"&&(P.textContent=tt),he=a(e),V=r(e,"P",{"data-svelte-h":!0}),u(V)!=="svelte-o0h1ee"&&(V.innerHTML=nt),_e=a(e),f(x.$$.fragment,e),be=a(e),O=r(e,"P",{"data-svelte-h":!0}),u(O)!=="svelte-1yp0gqw"&&(O.innerHTML=it),Me=a(e),f(L.$$.fragment,e),ye=a(e),E=r(e,"P",{"data-svelte-h":!0}),u(E)!=="svelte-jub7f1"&&(E.textContent=at),we=a(e),f(z.$$.fragment,e),Te=a(e),Y=r(e,"P",{"data-svelte-h":!0}),u(Y)!=="svelte-1a87kfl"&&(Y.innerHTML=st),ve=a(e),f(H.$$.fragment,e),Ge=a(e),R=r(e,"P",{"data-svelte-h":!0}),u(R)!=="svelte-53bqqd"&&(R.innerHTML=lt),Je=a(e),f(X.$$.fragment,e),je=a(e),f(S.$$.fragment,e),Ie=a(e),d=r(e,"DIV",{class:!0});var c=$(d);f(A.$$.fragment,c),Pe=a(c),ie=r(c,"P",{"data-svelte-h":!0}),u(ie)!=="svelte-mbik9p"&&(ie.textContent=ot),Ve=a(c),ae=r(c,"P",{"data-svelte-h":!0}),u(ae)!=="svelte-1x5trn6"&&(ae.innerHTML=rt),Oe=a(c),v=r(c,"DIV",{class:!0});var J=$(v);f(F.$$.fragment,J),Le=a(J),se=r(J,"P",{"data-svelte-h":!0}),u(se)!=="svelte-v78lg8"&&(se.textContent=pt),Ee=a(J),f(k.$$.fragment,J),ze=a(J),le=r(J,"P",{"data-svelte-h":!0}),u(le)!=="svelte-ruq61m"&&(le.innerHTML=dt),J.forEach(n),Ye=a(c),Z=r(c,"DIV",{class:!0});var ke=$(Z);f(N.$$.fragment,ke),He=a(ke),oe=r(ke,"P",{"data-svelte-h":!0}),u(oe)!=="svelte-1s3c06i"&&(oe.innerHTML=ct),ke.forEach(n),Re=a(c),W=r(c,"DIV",{class:!0});var Ze=$(W);f(Q.$$.fragment,Ze),Xe=a(Ze),re=r(Ze,"P",{"data-svelte-h":!0}),u(re)!=="svelte-pkn4ui"&&(re.innerHTML=mt),Ze.forEach(n),Se=a(c),B=r(c,"DIV",{class:!0});var We=$(B);f(D.$$.fragment,We),Ae=a(We),pe=r(We,"P",{"data-svelte-h":!0}),u(pe)!=="svelte-14bnrb6"&&(pe.textContent=ut),We.forEach(n),Fe=a(c),C=r(c,"DIV",{class:!0});var Be=$(C);f(q.$$.fragment,Be),Ne=a(Be),de=r(Be,"P",{"data-svelte-h":!0}),u(de)!=="svelte-1xwrf7t"&&(de.textContent=gt),Be.forEach(n),Qe=a(c),I=r(c,"DIV",{class:!0});var ue=$(I);f(K.$$.fragment,ue),De=a(ue),ce=r(ue,"P",{"data-svelte-h":!0}),u(ce)!=="svelte-skn529"&&(ce.textContent=ft),qe=a(ue),me=r(ue,"P",{"data-svelte-h":!0}),u(me)!=="svelte-1c3z8g4"&&(me.textContent=ht),ue.forEach(n),c.forEach(n),$e=a(e),f(ee.$$.fragment,e),Ue=a(e),ge=r(e,"P",{}),$(ge).forEach(n),this.h()},h(){U(p,"name","hf:doc:metadata"),U(p,"content",Ut),U(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(d,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){s(document.head,p),l(e,j,t),l(e,y,t),l(e,w,t),h(T,e,t),l(e,m,t),l(e,G,t),l(e,fe,t),l(e,P,t),l(e,he,t),l(e,V,t),l(e,_e,t),h(x,e,t),l(e,be,t),l(e,O,t),l(e,Me,t),h(L,e,t),l(e,ye,t),l(e,E,t),l(e,we,t),h(z,e,t),l(e,Te,t),l(e,Y,t),l(e,ve,t),h(H,e,t),l(e,Ge,t),l(e,R,t),l(e,Je,t),h(X,e,t),l(e,je,t),h(S,e,t),l(e,Ie,t),l(e,d,t),h(A,d,null),s(d,Pe),s(d,ie),s(d,Ve),s(d,ae),s(d,Oe),s(d,v),h(F,v,null),s(v,Le),s(v,se),s(v,Ee),h(k,v,null),s(v,ze),s(v,le),s(d,Ye),s(d,Z),h(N,Z,null),s(Z,He),s(Z,oe),s(d,Re),s(d,W),h(Q,W,null),s(W,Xe),s(W,re),s(d,Se),s(d,B),h(D,B,null),s(B,Ae),s(B,pe),s(d,Fe),s(d,C),h(q,C,null),s(C,Ne),s(C,de),s(d,Qe),s(d,I),h(K,I,null),s(I,De),s(I,ce),s(I,qe),s(I,me),l(e,$e,t),h(ee,e,t),l(e,Ue,t),l(e,ge,t),xe=!0},p(e,[t]){const c={};t&2&&(c.$$scope={dirty:t,ctx:e}),x.$set(c);const J={};t&2&&(J.$$scope={dirty:t,ctx:e}),k.$set(J)},i(e){xe||(_(T.$$.fragment,e),_(x.$$.fragment,e),_(L.$$.fragment,e),_(z.$$.fragment,e),_(H.$$.fragment,e),_(X.$$.fragment,e),_(S.$$.fragment,e),_(A.$$.fragment,e),_(F.$$.fragment,e),_(k.$$.fragment,e),_(N.$$.fragment,e),_(Q.$$.fragment,e),_(D.$$.fragment,e),_(q.$$.fragment,e),_(K.$$.fragment,e),_(ee.$$.fragment,e),xe=!0)},o(e){b(T.$$.fragment,e),b(x.$$.fragment,e),b(L.$$.fragment,e),b(z.$$.fragment,e),b(H.$$.fragment,e),b(X.$$.fragment,e),b(S.$$.fragment,e),b(A.$$.fragment,e),b(F.$$.fragment,e),b(k.$$.fragment,e),b(N.$$.fragment,e),b(Q.$$.fragment,e),b(D.$$.fragment,e),b(q.$$.fragment,e),b(K.$$.fragment,e),b(ee.$$.fragment,e),xe=!1},d(e){e&&(n(j),n(y),n(w),n(m),n(G),n(fe),n(P),n(he),n(V),n(_e),n(be),n(O),n(Me),n(ye),n(E),n(we),n(Te),n(Y),n(ve),n(Ge),n(R),n(Je),n(je),n(Ie),n(d),n($e),n(Ue),n(ge)),n(p),M(T,e),M(x,e),M(L,e),M(z,e),M(H,e),M(X,e),M(S,e),M(A),M(F),M(k),M(N),M(Q),M(D),M(q),M(K),M(ee,e)}}}const Ut='{"title":"OmniGen","local":"omnigen","sections":[{"title":"Inference","local":"inference","sections":[],"depth":2},{"title":"OmniGenPipeline","local":"diffusers.OmniGenPipeline","sections":[],"depth":2}],"depth":1}';function xt(ne){return Mt(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ot extends yt{constructor(p){super(),wt(this,p,xt,$t,bt,{})}}export{Ot as component}; | |
Xet Storage Details
- Size:
- 32.1 kB
- Xet hash:
- cf4ebf1226783bf9af3c89dba0d5a02ff7a8dca54e58953ec840e7fcdab98175
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.