Update app.py
Browse files
app.py
CHANGED
|
@@ -290,10 +290,12 @@ print("=" * 80)
|
|
| 290 |
pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
|
| 291 |
general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="ltx23__demopose_d3m0p0s3.safetensors")
|
| 292 |
motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
|
|
|
|
| 293 |
|
| 294 |
print(f"Pose LoRA: {pose_lora_path}")
|
| 295 |
print(f"General LoRA: {general_lora_path}")
|
| 296 |
print(f"Motion LoRA: {motion_lora_path}")
|
|
|
|
| 297 |
# ----------------------------------------------------------------
|
| 298 |
|
| 299 |
print(f"Checkpoint: {checkpoint_path}")
|
|
@@ -311,11 +313,12 @@ pipeline = LTX23DistilledA2VPipeline(
|
|
| 311 |
)
|
| 312 |
# ----------------------------------------------------------------
|
| 313 |
|
| 314 |
-
def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float) -> tuple[str, str]:
|
| 315 |
rp = round(float(pose_strength), 2)
|
| 316 |
rg = round(float(general_strength), 2)
|
| 317 |
rm = round(float(motion_strength), 2)
|
| 318 |
-
|
|
|
|
| 319 |
key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
|
| 320 |
return key, key_str
|
| 321 |
|
|
@@ -324,6 +327,7 @@ def prepare_lora_cache(
|
|
| 324 |
pose_strength: float,
|
| 325 |
general_strength: float,
|
| 326 |
motion_strength: float,
|
|
|
|
| 327 |
progress=gr.Progress(track_tqdm=True),
|
| 328 |
):
|
| 329 |
"""
|
|
@@ -336,7 +340,7 @@ def prepare_lora_cache(
|
|
| 336 |
global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
|
| 337 |
|
| 338 |
ledger = pipeline.model_ledger
|
| 339 |
-
key, _ = _make_lora_key(pose_strength, general_strength, motion_strength)
|
| 340 |
cache_path = LORA_CACHE_DIR / f"{key}.pt"
|
| 341 |
|
| 342 |
progress(0.05, desc="Preparing LoRA state")
|
|
@@ -355,6 +359,7 @@ def prepare_lora_cache(
|
|
| 355 |
(pose_lora_path, round(float(pose_strength), 2)),
|
| 356 |
(general_lora_path, round(float(general_strength), 2)),
|
| 357 |
(motion_lora_path, round(float(motion_strength), 2)),
|
|
|
|
| 358 |
]
|
| 359 |
loras_for_builder = [
|
| 360 |
LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
|
|
@@ -557,6 +562,7 @@ def get_gpu_duration(
|
|
| 557 |
pose_strength: float = 0.0,
|
| 558 |
general_strength: float = 0.0,
|
| 559 |
motion_strength: float = 0.0,
|
|
|
|
| 560 |
progress=None,
|
| 561 |
):
|
| 562 |
return int(gpu_duration)
|
|
@@ -578,6 +584,7 @@ def generate_video(
|
|
| 578 |
pose_strength: float = 0.0,
|
| 579 |
general_strength: float = 0.0,
|
| 580 |
motion_strength: float = 0.0,
|
|
|
|
| 581 |
progress=gr.Progress(track_tqdm=True),
|
| 582 |
):
|
| 583 |
try:
|
|
@@ -687,15 +694,19 @@ with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
|
|
| 687 |
with gr.Column():
|
| 688 |
gr.Markdown("### LoRA adapter strengths (set to 0 to disable)")
|
| 689 |
pose_strength = gr.Slider(
|
| 690 |
-
label="
|
| 691 |
minimum=0.0, maximum=2.0, value=0.0, step=0.01
|
| 692 |
)
|
| 693 |
general_strength = gr.Slider(
|
| 694 |
-
label="
|
| 695 |
minimum=0.0, maximum=2.0, value=0.0, step=0.01
|
| 696 |
)
|
| 697 |
motion_strength = gr.Slider(
|
| 698 |
-
label="Motion Helper strength",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 699 |
minimum=0.0, maximum=2.0, value=0.0, step=0.01
|
| 700 |
)
|
| 701 |
prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
|
|
@@ -737,12 +748,13 @@ with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
|
|
| 737 |
0.0, # pose_strength (example)
|
| 738 |
0.0, # general_strength (example)
|
| 739 |
0.0, # motion_strength (example)
|
|
|
|
| 740 |
],
|
| 741 |
],
|
| 742 |
inputs=[
|
| 743 |
first_image, last_image, input_audio, prompt, duration, gpu_duration,
|
| 744 |
enhance_prompt, seed, randomize_seed, height, width,
|
| 745 |
-
pose_strength, general_strength, motion_strength,
|
| 746 |
],
|
| 747 |
)
|
| 748 |
|
|
@@ -766,7 +778,7 @@ with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
|
|
| 766 |
|
| 767 |
prepare_lora_btn.click(
|
| 768 |
fn=prepare_lora_cache,
|
| 769 |
-
inputs=[pose_strength, general_strength, motion_strength],
|
| 770 |
outputs=[lora_status],
|
| 771 |
)
|
| 772 |
|
|
@@ -775,7 +787,7 @@ with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
|
|
| 775 |
inputs=[
|
| 776 |
first_image, last_image, input_audio, prompt, duration, gpu_duration, enhance_prompt,
|
| 777 |
seed, randomize_seed, height, width,
|
| 778 |
-
pose_strength, general_strength, motion_strength,
|
| 779 |
],
|
| 780 |
outputs=[output_video, seed],
|
| 781 |
)
|
|
|
|
| 290 |
pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
|
| 291 |
general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="ltx23__demopose_d3m0p0s3.safetensors")
|
| 292 |
motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
|
| 293 |
+
reasoning_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_Reasoning_V1.safetensors")
|
| 294 |
|
| 295 |
print(f"Pose LoRA: {pose_lora_path}")
|
| 296 |
print(f"General LoRA: {general_lora_path}")
|
| 297 |
print(f"Motion LoRA: {motion_lora_path}")
|
| 298 |
+
print(f"Reasoning LoRA: {reasoning_lora_path}")
|
| 299 |
# ----------------------------------------------------------------
|
| 300 |
|
| 301 |
print(f"Checkpoint: {checkpoint_path}")
|
|
|
|
| 313 |
)
|
| 314 |
# ----------------------------------------------------------------
|
| 315 |
|
| 316 |
+
def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float, reasoning_strength: float) -> tuple[str, str]:
|
| 317 |
rp = round(float(pose_strength), 2)
|
| 318 |
rg = round(float(general_strength), 2)
|
| 319 |
rm = round(float(motion_strength), 2)
|
| 320 |
+
rr = round(float(reasoning_strength), 2)
|
| 321 |
+
key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{reasoning_lora_path}:{rr}"
|
| 322 |
key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
|
| 323 |
return key, key_str
|
| 324 |
|
|
|
|
| 327 |
pose_strength: float,
|
| 328 |
general_strength: float,
|
| 329 |
motion_strength: float,
|
| 330 |
+
reasoning_strength: float,
|
| 331 |
progress=gr.Progress(track_tqdm=True),
|
| 332 |
):
|
| 333 |
"""
|
|
|
|
| 340 |
global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
|
| 341 |
|
| 342 |
ledger = pipeline.model_ledger
|
| 343 |
+
key, _ = _make_lora_key(pose_strength, general_strength, motion_strength, reasoning_strength)
|
| 344 |
cache_path = LORA_CACHE_DIR / f"{key}.pt"
|
| 345 |
|
| 346 |
progress(0.05, desc="Preparing LoRA state")
|
|
|
|
| 359 |
(pose_lora_path, round(float(pose_strength), 2)),
|
| 360 |
(general_lora_path, round(float(general_strength), 2)),
|
| 361 |
(motion_lora_path, round(float(motion_strength), 2)),
|
| 362 |
+
(reasoning_lora_path, round(float(reasoning_strength), 2)),
|
| 363 |
]
|
| 364 |
loras_for_builder = [
|
| 365 |
LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
|
|
|
|
| 562 |
pose_strength: float = 0.0,
|
| 563 |
general_strength: float = 0.0,
|
| 564 |
motion_strength: float = 0.0,
|
| 565 |
+
reasoning_strength: float = 0.0,
|
| 566 |
progress=None,
|
| 567 |
):
|
| 568 |
return int(gpu_duration)
|
|
|
|
| 584 |
pose_strength: float = 0.0,
|
| 585 |
general_strength: float = 0.0,
|
| 586 |
motion_strength: float = 0.0,
|
| 587 |
+
reasoning_strength: float = 0.0,
|
| 588 |
progress=gr.Progress(track_tqdm=True),
|
| 589 |
):
|
| 590 |
try:
|
|
|
|
| 694 |
with gr.Column():
|
| 695 |
gr.Markdown("### LoRA adapter strengths (set to 0 to disable)")
|
| 696 |
pose_strength = gr.Slider(
|
| 697 |
+
label="Motion Enhancer strength",
|
| 698 |
minimum=0.0, maximum=2.0, value=0.0, step=0.01
|
| 699 |
)
|
| 700 |
general_strength = gr.Slider(
|
| 701 |
+
label="Posing Enhancer strength",
|
| 702 |
minimum=0.0, maximum=2.0, value=0.0, step=0.01
|
| 703 |
)
|
| 704 |
motion_strength = gr.Slider(
|
| 705 |
+
label="Motion Enhancer Helper strength",
|
| 706 |
+
minimum=0.0, maximum=2.0, value=0.0, step=0.01
|
| 707 |
+
)
|
| 708 |
+
reasoning_strength = gr.Slider(
|
| 709 |
+
label="Reasoning Helper strength",
|
| 710 |
minimum=0.0, maximum=2.0, value=0.0, step=0.01
|
| 711 |
)
|
| 712 |
prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
|
|
|
|
| 748 |
0.0, # pose_strength (example)
|
| 749 |
0.0, # general_strength (example)
|
| 750 |
0.0, # motion_strength (example)
|
| 751 |
+
0.0,
|
| 752 |
],
|
| 753 |
],
|
| 754 |
inputs=[
|
| 755 |
first_image, last_image, input_audio, prompt, duration, gpu_duration,
|
| 756 |
enhance_prompt, seed, randomize_seed, height, width,
|
| 757 |
+
pose_strength, general_strength, motion_strength, reasoning_strength,
|
| 758 |
],
|
| 759 |
)
|
| 760 |
|
|
|
|
| 778 |
|
| 779 |
prepare_lora_btn.click(
|
| 780 |
fn=prepare_lora_cache,
|
| 781 |
+
inputs=[pose_strength, general_strength, motion_strength, reasoning_strength],
|
| 782 |
outputs=[lora_status],
|
| 783 |
)
|
| 784 |
|
|
|
|
| 787 |
inputs=[
|
| 788 |
first_image, last_image, input_audio, prompt, duration, gpu_duration, enhance_prompt,
|
| 789 |
seed, randomize_seed, height, width,
|
| 790 |
+
pose_strength, general_strength, motion_strength, reasoning_strength
|
| 791 |
],
|
| 792 |
outputs=[output_video, seed],
|
| 793 |
)
|