dagloop5 commited on
Commit
531c4d0
·
verified ·
1 Parent(s): d5c5908

Delete app(theonethatreallyworks).py

Browse files
Files changed (1) hide show
  1. app(theonethatreallyworks).py +0 -723
app(theonethatreallyworks).py DELETED
@@ -1,723 +0,0 @@
1
- import os
2
- import subprocess
3
- import sys
4
-
5
- # Disable torch.compile / dynamo before any torch import
6
- os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
- os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
-
9
- # Install xformers for memory-efficient attention
10
- subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
-
12
- # Clone LTX-2 repo and install packages
13
- LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
- LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
-
16
- if not os.path.exists(LTX_REPO_DIR):
17
- print(f"Cloning {LTX_REPO_URL}...")
18
- subprocess.run(["git", "clone", "--depth", "1", LTX_REPO_URL, LTX_REPO_DIR], check=True)
19
-
20
- print("Installing ltx-core and ltx-pipelines from cloned repo...")
21
- subprocess.run(
22
- [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
23
- os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
24
- "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
25
- check=True,
26
- )
27
-
28
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
29
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
30
-
31
- import logging
32
- import random
33
- import tempfile
34
- from pathlib import Path
35
- import gc
36
-
37
- import torch
38
- torch._dynamo.config.suppress_errors = True
39
- torch._dynamo.config.disable = True
40
-
41
- import spaces
42
- import gradio as gr
43
- import numpy as np
44
- from huggingface_hub import hf_hub_download, snapshot_download
45
-
46
- from ltx_core.components.diffusion_steps import EulerDiffusionStep
47
- from ltx_core.components.noisers import GaussianNoiser
48
- from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
49
- from ltx_core.model.upsampler import upsample_video
50
- from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
51
- from ltx_core.quantization import QuantizationPolicy
52
- from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
53
- from ltx_pipelines.distilled import DistilledPipeline
54
- from ltx_pipelines.utils import euler_denoising_loop
55
- from ltx_pipelines.utils.args import ImageConditioningInput
56
- from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
57
- from ltx_pipelines.utils.helpers import (
58
- cleanup_memory,
59
- combined_image_conditionings,
60
- denoise_video_only,
61
- encode_prompts,
62
- simple_denoising_func,
63
- )
64
- from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
65
- from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
66
- from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
67
-
68
- # Force-patch xformers attention into the LTX attention module.
69
- from ltx_core.model.transformer import attention as _attn_mod
70
- print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
71
- try:
72
- from xformers.ops import memory_efficient_attention as _mea
73
- _attn_mod.memory_efficient_attention = _mea
74
- print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
75
- except Exception as e:
76
- print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
77
-
78
- logging.getLogger().setLevel(logging.INFO)
79
-
80
- MAX_SEED = np.iinfo(np.int32).max
81
- DEFAULT_PROMPT = (
82
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
83
- "the shell cracking and peeling apart in gentle low-gravity motion. "
84
- "Fine lunar dust lifts and drifts outward with each movement, floating "
85
- "in slow arcs before settling back onto the ground."
86
- )
87
- DEFAULT_FRAME_RATE = 24.0
88
-
89
- # Resolution presets: (width, height)
90
- RESOLUTIONS = {
91
- "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
92
- "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
93
- }
94
-
95
-
96
- class LTX23DistilledA2VPipeline(DistilledPipeline):
97
- """DistilledPipeline with optional audio conditioning."""
98
-
99
- def __call__(
100
- self,
101
- prompt: str,
102
- seed: int,
103
- height: int,
104
- width: int,
105
- num_frames: int,
106
- frame_rate: float,
107
- images: list[ImageConditioningInput],
108
- audio_path: str | None = None,
109
- tiling_config: TilingConfig | None = None,
110
- enhance_prompt: bool = False,
111
- ):
112
- # Standard path when no audio input is provided.
113
- print(prompt)
114
- if audio_path is None:
115
- return super().__call__(
116
- prompt=prompt,
117
- seed=seed,
118
- height=height,
119
- width=width,
120
- num_frames=num_frames,
121
- frame_rate=frame_rate,
122
- images=images,
123
- tiling_config=tiling_config,
124
- enhance_prompt=enhance_prompt,
125
- )
126
-
127
- generator = torch.Generator(device=self.device).manual_seed(seed)
128
- noiser = GaussianNoiser(generator=generator)
129
- stepper = EulerDiffusionStep()
130
- dtype = torch.bfloat16
131
-
132
- (ctx_p,) = encode_prompts(
133
- [prompt],
134
- self.model_ledger,
135
- enhance_first_prompt=enhance_prompt,
136
- enhance_prompt_image=images[0].path if len(images) > 0 else None,
137
- )
138
- video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
139
-
140
- video_duration = num_frames / frame_rate
141
- decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
142
- if decoded_audio is None:
143
- raise ValueError(f"Could not extract audio stream from {audio_path}")
144
-
145
- encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
146
- audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
147
- expected_frames = audio_shape.frames
148
- actual_frames = encoded_audio_latent.shape[2]
149
-
150
- if actual_frames > expected_frames:
151
- encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
152
- elif actual_frames < expected_frames:
153
- pad = torch.zeros(
154
- encoded_audio_latent.shape[0],
155
- encoded_audio_latent.shape[1],
156
- expected_frames - actual_frames,
157
- encoded_audio_latent.shape[3],
158
- device=encoded_audio_latent.device,
159
- dtype=encoded_audio_latent.dtype,
160
- )
161
- encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
162
-
163
- video_encoder = self.model_ledger.video_encoder()
164
- transformer = self.model_ledger.transformer()
165
- stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
166
-
167
- def denoising_loop(sigmas, video_state, audio_state, stepper):
168
- return euler_denoising_loop(
169
- sigmas=sigmas,
170
- video_state=video_state,
171
- audio_state=audio_state,
172
- stepper=stepper,
173
- denoise_fn=simple_denoising_func(
174
- video_context=video_context,
175
- audio_context=audio_context,
176
- transformer=transformer,
177
- ),
178
- )
179
-
180
- stage_1_output_shape = VideoPixelShape(
181
- batch=1,
182
- frames=num_frames,
183
- width=width // 2,
184
- height=height // 2,
185
- fps=frame_rate,
186
- )
187
- stage_1_conditionings = combined_image_conditionings(
188
- images=images,
189
- height=stage_1_output_shape.height,
190
- width=stage_1_output_shape.width,
191
- video_encoder=video_encoder,
192
- dtype=dtype,
193
- device=self.device,
194
- )
195
- video_state = denoise_video_only(
196
- output_shape=stage_1_output_shape,
197
- conditionings=stage_1_conditionings,
198
- noiser=noiser,
199
- sigmas=stage_1_sigmas,
200
- stepper=stepper,
201
- denoising_loop_fn=denoising_loop,
202
- components=self.pipeline_components,
203
- dtype=dtype,
204
- device=self.device,
205
- initial_audio_latent=encoded_audio_latent,
206
- )
207
-
208
- torch.cuda.synchronize()
209
- cleanup_memory()
210
-
211
- upscaled_video_latent = upsample_video(
212
- latent=video_state.latent[:1],
213
- video_encoder=video_encoder,
214
- upsampler=self.model_ledger.spatial_upsampler(),
215
- )
216
- stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
217
- stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
218
- stage_2_conditionings = combined_image_conditionings(
219
- images=images,
220
- height=stage_2_output_shape.height,
221
- width=stage_2_output_shape.width,
222
- video_encoder=video_encoder,
223
- dtype=dtype,
224
- device=self.device,
225
- )
226
- video_state = denoise_video_only(
227
- output_shape=stage_2_output_shape,
228
- conditionings=stage_2_conditionings,
229
- noiser=noiser,
230
- sigmas=stage_2_sigmas,
231
- stepper=stepper,
232
- denoising_loop_fn=denoising_loop,
233
- components=self.pipeline_components,
234
- dtype=dtype,
235
- device=self.device,
236
- noise_scale=stage_2_sigmas[0],
237
- initial_video_latent=upscaled_video_latent,
238
- initial_audio_latent=encoded_audio_latent,
239
- )
240
-
241
- torch.cuda.synchronize()
242
- del transformer
243
- del video_encoder
244
- cleanup_memory()
245
-
246
- decoded_video = vae_decode_video(
247
- video_state.latent,
248
- self.model_ledger.video_decoder(),
249
- tiling_config,
250
- generator,
251
- )
252
- original_audio = Audio(
253
- waveform=decoded_audio.waveform.squeeze(0),
254
- sampling_rate=decoded_audio.sampling_rate,
255
- )
256
- return decoded_video, original_audio
257
-
258
-
259
- # Model repos
260
- LTX_MODEL_REPO = "Lightricks/LTX-2.3"
261
- GEMMA_REPO ="rahul7star/gemma-3-12b-it-heretic"
262
-
263
-
264
- # Download model checkpoints
265
- print("=" * 80)
266
- print("Downloading LTX-2.3 distilled model + Gemma...")
267
- print("=" * 80)
268
-
269
- checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
270
- spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
271
- gemma_root = snapshot_download(repo_id=GEMMA_REPO)
272
-
273
- # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
274
- # LoRA repo + download the requested LoRA adapters
275
- LORA_REPO = "dagloop5/LoRA"
276
-
277
- print("=" * 80)
278
- print("Downloading LoRA adapters from dagloop5/LoRA...")
279
- print("=" * 80)
280
- pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="pose_enhancer.safetensors")
281
- general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="general_enhancer.safetensors")
282
- motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
283
-
284
- print(f"Pose LoRA: {pose_lora_path}")
285
- print(f"General LoRA: {general_lora_path}")
286
- print(f"Motion LoRA: {motion_lora_path}")
287
- # ----------------------------------------------------------------
288
-
289
- print(f"Checkpoint: {checkpoint_path}")
290
- print(f"Spatial upsampler: {spatial_upsampler_path}")
291
- print(f"Gemma root: {gemma_root}")
292
-
293
- # Initialize pipeline WITH text encoder and optional audio support
294
- # ---- Replace block (pipeline init) lines 275-281 ----
295
- pipeline = LTX23DistilledA2VPipeline(
296
- distilled_checkpoint_path=checkpoint_path,
297
- spatial_upsampler_path=spatial_upsampler_path,
298
- gemma_root=gemma_root,
299
- loras=[],
300
- quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
301
- )
302
- # ----------------------------------------------------------------
303
-
304
- def apply_loras_to_pipeline(pose_strength: float, general_strength: float, motion_strength: float):
305
- """
306
- Apply LoRAs by:
307
- 1) creating a temporary ledger with requested LoRAs,
308
- 2) building the fused transformer on CPU only,
309
- 3) copying parameters & buffers in-place into the existing GPU transformer,
310
- 4) freeing CPU objects and clearing cache.
311
- This avoids having two full transformers on GPU simultaneously.
312
- """
313
- ledger = pipeline.model_ledger
314
-
315
- entries = [
316
- (pose_lora_path, float(pose_strength)),
317
- (general_lora_path, float(general_strength)),
318
- (motion_lora_path, float(motion_strength)),
319
- ]
320
-
321
- # Build LoraPathStrengthAndSDOps for non-zero strengths
322
- loras_for_builder = [
323
- LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
324
- for path, strength in entries
325
- if path is not None and float(strength) != 0.0
326
- ]
327
-
328
- if len(loras_for_builder) == 0:
329
- print("[LoRA] No nonzero LoRA strengths — skipping rebuild.")
330
- return
331
-
332
- try:
333
- # Create temporary ledger configured with LoRAs
334
- tmp_ledger = ledger.with_loras(tuple(loras_for_builder))
335
- print(f"[LoRA] Built temporary ledger with {len(loras_for_builder)} LoRA(s).")
336
-
337
- # Force the temporary ledger to build on CPU so the fused model is built on CPU.
338
- # Save original attributes to restore them later.
339
- orig_tmp_target = getattr(tmp_ledger, "_target_device", None)
340
- orig_tmp_device = getattr(tmp_ledger, "device", None)
341
- try:
342
- # _target_device is expected to be callable by model_ledger.transformer()
343
- # set it to a callable that returns CPU so builder.build(device=...) works.
344
- tmp_ledger._target_device = (lambda: torch.device("cpu"))
345
- # ledger.device is used after build: set it to CPU so .to(self.device) keeps the model on CPU.
346
- tmp_ledger.device = torch.device("cpu")
347
- print("[LoRA] Building fused transformer on CPU (no GPU allocation)...")
348
- new_transformer_cpu = tmp_ledger.transformer() # should now return a CPU model
349
- print("[LoRA] Fused transformer built on CPU.")
350
- finally:
351
- # Restore attributes to their previous values (if there were any).
352
- if orig_tmp_target is not None:
353
- tmp_ledger._target_device = orig_tmp_target
354
- else:
355
- # remove attribute if ledger did not have it previously
356
- try:
357
- delattr(tmp_ledger, "_target_device")
358
- except Exception:
359
- pass
360
- if orig_tmp_device is not None:
361
- tmp_ledger.device = orig_tmp_device
362
- else:
363
- try:
364
- delattr(tmp_ledger, "device")
365
- except Exception:
366
- pass
367
-
368
- # Get the existing transformer instance (the one currently used by the pipeline).
369
- global _transformer
370
- try:
371
- existing_transformer = _transformer
372
- except NameError:
373
- # If not cached, ask ledger for it (this will be the GPU-resident model already loaded).
374
- existing_transformer = ledger.transformer()
375
- _transformer = existing_transformer
376
-
377
- # Map existing parameters & buffers for quick lookup
378
- existing_params = {name: param for name, param in existing_transformer.named_parameters()}
379
- existing_buffers = {name: buf for name, buf in existing_transformer.named_buffers()}
380
-
381
- # State dict of CPU model (fused with LoRAs)
382
- new_state = new_transformer_cpu.state_dict()
383
- # diagnostics: how many keys will be copied
384
- total_keys = len(new_state)
385
- matched = sum(1 for k in new_state if k in existing_params or k in existing_buffers)
386
- print(f"[LoRA] Transformer state keys: total={total_keys} matched_for_copy={matched}")
387
- if matched == 0:
388
- # helpful hint if naming differs
389
- sample_keys = list(new_state.keys())[:10]
390
- print(f"[LoRA] Warning: 0 matching keys found. sample new_state keys: {sample_keys}")
391
-
392
- # Copy CPU tensors into the GPU-resident transformer's params/buffers in-place
393
- with torch.no_grad():
394
- for k, v in new_state.items():
395
- if k in existing_params:
396
- tgt = existing_params[k].data
397
- try:
398
- tgt.copy_(v.to(tgt.device))
399
- except Exception as e:
400
- print(f"[LoRA] Failed to copy parameter {k}: {type(e).__name__}: {e}")
401
- elif k in existing_buffers:
402
- tgt = existing_buffers[k].data
403
- try:
404
- tgt.copy_(v.to(tgt.device))
405
- except Exception as e:
406
- print(f"[LoRA] Failed to copy buffer {k}: {type(e).__name__}: {e}")
407
- else:
408
- # Parameter name mismatch — skip
409
- # This can happen if LoRA changes expected keys; not fatal.
410
- # Print debug once for the first few unmatched keys.
411
- pass
412
-
413
- # Free CPU-built transformer and temporary ledger resources, then clear caches
414
- try:
415
- del new_transformer_cpu
416
- del tmp_ledger
417
- except Exception:
418
- pass
419
- gc.collect()
420
- torch.cuda.empty_cache()
421
-
422
- print("[LoRA] In-place parameter copy complete. LoRAs applied to the existing transformer.")
423
- return
424
-
425
- except Exception as e:
426
- import traceback
427
- print(f"[LoRA] Error during in-place LoRA application: {type(e).__name__}: {e}")
428
- print(traceback.format_exc())
429
-
430
- # If something unexpectedly failed, bail out (no fallback).
431
- print("[LoRA] apply_loras_to_pipeline finished (LOADING FAILED — no changes applied).")
432
-
433
- # ---- REPLACE PRELOAD BLOCK START ----
434
- # Preload all models for ZeroGPU tensor packing.
435
- print("Preloading all models (including Gemma and audio components)...")
436
- ledger = pipeline.model_ledger
437
-
438
- # Save the original factory methods so we can rebuild individual components later.
439
- # These are bound callables on ledger that will call the builder when invoked.
440
- _orig_transformer_factory = ledger.transformer
441
- _orig_video_encoder_factory = ledger.video_encoder
442
- _orig_video_decoder_factory = ledger.video_decoder
443
- _orig_audio_encoder_factory = ledger.audio_encoder
444
- _orig_audio_decoder_factory = ledger.audio_decoder
445
- _orig_vocoder_factory = ledger.vocoder
446
- _orig_spatial_upsampler_factory = ledger.spatial_upsampler
447
- _orig_text_encoder_factory = ledger.text_encoder
448
- _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
449
-
450
- # Call the original factories once to create the cached instances we will serve by default.
451
- _transformer = _orig_transformer_factory()
452
- _video_encoder = _orig_video_encoder_factory()
453
- _video_decoder = _orig_video_decoder_factory()
454
- _audio_encoder = _orig_audio_encoder_factory()
455
- _audio_decoder = _orig_audio_decoder_factory()
456
- _vocoder = _orig_vocoder_factory()
457
- _spatial_upsampler = _orig_spatial_upsampler_factory()
458
- _text_encoder = _orig_text_encoder_factory()
459
- _embeddings_processor = _orig_gemma_embeddings_factory()
460
-
461
- # Replace ledger methods with lightweight lambdas that return the cached instances.
462
- # We keep the original factories above so we can call them later to rebuild components.
463
- ledger.transformer = lambda: _transformer
464
- ledger.video_encoder = lambda: _video_encoder
465
- ledger.video_decoder = lambda: _video_decoder
466
- ledger.audio_encoder = lambda: _audio_encoder
467
- ledger.audio_decoder = lambda: _audio_decoder
468
- ledger.vocoder = lambda: _vocoder
469
- ledger.spatial_upsampler = lambda: _spatial_upsampler
470
- ledger.text_encoder = lambda: _text_encoder
471
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
472
-
473
- print("All models preloaded (including Gemma text encoder and audio encoder)!")
474
- # ---- REPLACE PRELOAD BLOCK END ----
475
-
476
- print("=" * 80)
477
- print("Pipeline ready!")
478
- print("=" * 80)
479
-
480
-
481
- def log_memory(tag: str):
482
- if torch.cuda.is_available():
483
- allocated = torch.cuda.memory_allocated() / 1024**3
484
- peak = torch.cuda.max_memory_allocated() / 1024**3
485
- free, total = torch.cuda.mem_get_info()
486
- print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
487
-
488
-
489
- def detect_aspect_ratio(image) -> str:
490
- if image is None:
491
- return "16:9"
492
- if hasattr(image, "size"):
493
- w, h = image.size
494
- elif hasattr(image, "shape"):
495
- h, w = image.shape[:2]
496
- else:
497
- return "16:9"
498
- ratio = w / h
499
- candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
500
- return min(candidates, key=lambda k: abs(ratio - candidates[k]))
501
-
502
-
503
- def on_image_upload(first_image, last_image, high_res):
504
- ref_image = first_image if first_image is not None else last_image
505
- aspect = detect_aspect_ratio(ref_image)
506
- tier = "high" if high_res else "low"
507
- w, h = RESOLUTIONS[tier][aspect]
508
- return gr.update(value=w), gr.update(value=h)
509
-
510
-
511
- def on_highres_toggle(first_image, last_image, high_res):
512
- ref_image = first_image if first_image is not None else last_image
513
- aspect = detect_aspect_ratio(ref_image)
514
- tier = "high" if high_res else "low"
515
- w, h = RESOLUTIONS[tier][aspect]
516
- return gr.update(value=w), gr.update(value=h)
517
-
518
-
519
- @spaces.GPU(duration=80)
520
- @torch.inference_mode()
521
- def generate_video(
522
- first_image,
523
- last_image,
524
- input_audio,
525
- prompt: str,
526
- duration: float,
527
- enhance_prompt: bool = True,
528
- seed: int = 42,
529
- randomize_seed: bool = True,
530
- height: int = 1024,
531
- width: int = 1536,
532
- pose_strength: float = 0.0,
533
- general_strength: float = 0.0,
534
- motion_strength: float = 0.0,
535
- progress=gr.Progress(track_tqdm=True),
536
- ):
537
- try:
538
- torch.cuda.reset_peak_memory_stats()
539
- log_memory("start")
540
-
541
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
542
-
543
- frame_rate = DEFAULT_FRAME_RATE
544
- num_frames = int(duration * frame_rate) + 1
545
- num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
546
-
547
- print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
548
-
549
- images = []
550
- output_dir = Path("outputs")
551
- output_dir.mkdir(exist_ok=True)
552
-
553
- if first_image is not None:
554
- temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
555
- if hasattr(first_image, "save"):
556
- first_image.save(temp_first_path)
557
- else:
558
- temp_first_path = Path(first_image)
559
- images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
560
-
561
- if last_image is not None:
562
- temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
563
- if hasattr(last_image, "save"):
564
- last_image.save(temp_last_path)
565
- else:
566
- temp_last_path = Path(last_image)
567
- images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
568
-
569
- tiling_config = TilingConfig.default()
570
- video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
571
-
572
- log_memory("before pipeline call")
573
-
574
- apply_loras_to_pipeline(pose_strength, general_strength, motion_strength)
575
-
576
- video, audio = pipeline(
577
- prompt=prompt,
578
- seed=current_seed,
579
- height=int(height),
580
- width=int(width),
581
- num_frames=num_frames,
582
- frame_rate=frame_rate,
583
- images=images,
584
- audio_path=input_audio,
585
- tiling_config=tiling_config,
586
- enhance_prompt=enhance_prompt,
587
- )
588
-
589
- log_memory("after pipeline call")
590
-
591
- output_path = tempfile.mktemp(suffix=".mp4")
592
- encode_video(
593
- video=video,
594
- fps=frame_rate,
595
- audio=audio,
596
- output_path=output_path,
597
- video_chunks_number=video_chunks_number,
598
- )
599
-
600
- log_memory("after encode_video")
601
- return str(output_path), current_seed
602
-
603
- except Exception as e:
604
- import traceback
605
- log_memory("on error")
606
- print(f"Error: {str(e)}\n{traceback.format_exc()}")
607
- return None, current_seed
608
-
609
-
610
- with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
611
- gr.Markdown("# LTX-2.3 F2LF:Heretic with Fast Audio-Video Generation with Frame Conditioning")
612
-
613
-
614
- with gr.Row():
615
- with gr.Column():
616
- with gr.Row():
617
- first_image = gr.Image(label="First Frame (Optional)", type="pil")
618
- last_image = gr.Image(label="Last Frame (Optional)", type="pil")
619
- input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
620
- prompt = gr.Textbox(
621
- label="Prompt",
622
- info="for best results - make it as elaborate as possible",
623
- value="Make this image come alive with cinematic motion, smooth animation",
624
- lines=3,
625
- placeholder="Describe the motion and animation you want...",
626
- )
627
- duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=10.0, value=3.0, step=0.1)
628
-
629
-
630
- generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
631
-
632
- with gr.Accordion("Advanced Settings", open=False):
633
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
634
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
635
- with gr.Row():
636
- width = gr.Number(label="Width", value=1536, precision=0)
637
- height = gr.Number(label="Height", value=1024, precision=0)
638
- with gr.Row():
639
- enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
640
- high_res = gr.Checkbox(label="High Resolution", value=True)
641
- with gr.Column():
642
- gr.Markdown("### LoRA adapter strengths (set to 0 to disable)")
643
- pose_strength = gr.Slider(
644
- label="Pose Enhancer strength",
645
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
646
- )
647
- general_strength = gr.Slider(
648
- label="General Enhancer strength",
649
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
650
- )
651
- motion_strength = gr.Slider(
652
- label="Motion Helper strength",
653
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
654
- )
655
-
656
- with gr.Column():
657
- output_video = gr.Video(label="Generated Video", autoplay=False)
658
-
659
- gr.Examples(
660
- examples=[
661
- [
662
- None,
663
- "pinkknit.jpg",
664
- None,
665
- "The camera falls downward through darkness as if dropped into a tunnel. "
666
- "As it slows, five friends wearing pink knitted hats and sunglasses lean "
667
- "over and look down toward the camera with curious expressions. The lens "
668
- "has a strong fisheye effect, creating a circular frame around them. They "
669
- "crowd together closely, forming a symmetrical cluster while staring "
670
- "directly into the lens.",
671
- 3.0,
672
- False,
673
- 42,
674
- True,
675
- 1024,
676
- 1024,
677
- 0.0, # pose_strength (example)
678
- 0.0, # general_strength (example)
679
- 0.0, # motion_strength (example)
680
- ],
681
- ],
682
- inputs=[
683
- first_image, last_image, input_audio, prompt, duration,
684
- enhance_prompt, seed, randomize_seed, height, width,
685
- pose_strength, general_strength, motion_strength,
686
- ],
687
- )
688
-
689
- first_image.change(
690
- fn=on_image_upload,
691
- inputs=[first_image, last_image, high_res],
692
- outputs=[width, height],
693
- )
694
-
695
- last_image.change(
696
- fn=on_image_upload,
697
- inputs=[first_image, last_image, high_res],
698
- outputs=[width, height],
699
- )
700
-
701
- high_res.change(
702
- fn=on_highres_toggle,
703
- inputs=[first_image, last_image, high_res],
704
- outputs=[width, height],
705
- )
706
-
707
- generate_btn.click(
708
- fn=generate_video,
709
- inputs=[
710
- first_image, last_image, input_audio, prompt, duration, enhance_prompt,
711
- seed, randomize_seed, height, width,
712
- pose_strength, general_strength, motion_strength,
713
- ],
714
- outputs=[output_video, seed],
715
- )
716
-
717
-
718
- css = """
719
- .fillable{max-width: 1200px !important}
720
- """
721
-
722
- if __name__ == "__main__":
723
- demo.launch(theme=gr.themes.Citrus(), css=css)