dagloop5 commited on
Commit
0b6d12e
·
verified ·
1 Parent(s): 77c7cba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -2
app.py CHANGED
@@ -267,6 +267,7 @@ class LTX23DistilledA2VPipeline(DistilledPipeline):
267
  # Model repos
268
  LTX_MODEL_REPO = "Lightricks/LTX-2.3"
269
  GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
 
270
  EROS_REPO = "dagloop5/LoRA"
271
  EROS_FILE = "ltx2310eros_beta.safetensors"
272
 
@@ -328,6 +329,7 @@ checkpoint_path = EROS_FIXED
328
 
329
  spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
330
  gemma_root = snapshot_download(repo_id=GEMMA_REPO)
 
331
 
332
  # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
333
  # LoRA repo + download the requested LoRA adapters
@@ -360,6 +362,7 @@ print(f"Demopose LoRA: {demopose_lora_path}")
360
  print(f"Checkpoint: {checkpoint_path}")
361
  print(f"Spatial upsampler: {spatial_upsampler_path}")
362
  print(f"Gemma root: {gemma_root}")
 
363
 
364
  # Initialize pipeline WITH text encoder and optional audio support
365
  # ---- Replace block (pipeline init) lines 275-281 ----
@@ -367,7 +370,13 @@ pipeline = LTX23DistilledA2VPipeline(
367
  distilled_checkpoint_path=checkpoint_path,
368
  spatial_upsampler_path=spatial_upsampler_path,
369
  gemma_root=gemma_root,
370
- loras=[],
 
 
 
 
 
 
371
  quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
372
  )
373
  # ----------------------------------------------------------------
@@ -382,7 +391,12 @@ def _make_lora_key(pose_strength: float, general_strength: float, motion_strengt
382
  rf = round(float(fluid_strength), 2)
383
  rl = round(float(liquid_strength), 2)
384
  ro = round(float(demopose_strength), 2)
385
- key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}"
 
 
 
 
 
386
  key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
387
  return key, key_str
388
 
@@ -425,6 +439,7 @@ def prepare_lora_cache(
425
  print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
426
 
427
  entries = [
 
428
  (pose_lora_path, round(float(pose_strength), 2)),
429
  (general_lora_path, round(float(general_strength), 2)),
430
  (motion_lora_path, round(float(motion_strength), 2)),
 
267
  # Model repos
268
  LTX_MODEL_REPO = "Lightricks/LTX-2.3"
269
  GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
270
+ DISTILLED_LORA_FILE = "ltx-2.3-22b-distilled-lora-384.safetensors"
271
  EROS_REPO = "dagloop5/LoRA"
272
  EROS_FILE = "ltx2310eros_beta.safetensors"
273
 
 
329
 
330
  spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
331
  gemma_root = snapshot_download(repo_id=GEMMA_REPO)
332
+ distilled_lora_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename=DISTILLED_LORA_FILE)
333
 
334
  # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
335
  # LoRA repo + download the requested LoRA adapters
 
362
  print(f"Checkpoint: {checkpoint_path}")
363
  print(f"Spatial upsampler: {spatial_upsampler_path}")
364
  print(f"Gemma root: {gemma_root}")
365
+ print(f"Distilled LoRA: {distilled_lora_path}")
366
 
367
  # Initialize pipeline WITH text encoder and optional audio support
368
  # ---- Replace block (pipeline init) lines 275-281 ----
 
370
  distilled_checkpoint_path=checkpoint_path,
371
  spatial_upsampler_path=spatial_upsampler_path,
372
  gemma_root=gemma_root,
373
+ loras=[
374
+ LoraPathStrengthAndSDOps(
375
+ distilled_lora_path,
376
+ 1.0,
377
+ LTXV_LORA_COMFY_RENAMING_MAP,
378
+ )
379
+ ],
380
  quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
381
  )
382
  # ----------------------------------------------------------------
 
391
  rf = round(float(fluid_strength), 2)
392
  rl = round(float(liquid_strength), 2)
393
  ro = round(float(demopose_strength), 2)
394
+ key_str = (
395
+ f"{distilled_lora_path}:1.0|"
396
+ f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|"
397
+ f"{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|"
398
+ f"{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}"
399
+ )
400
  key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
401
  return key, key_str
402
 
 
439
  print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
440
 
441
  entries = [
442
+ (distilled_lora_path, 1.0),
443
  (pose_lora_path, round(float(pose_strength), 2)),
444
  (general_lora_path, round(float(general_strength), 2)),
445
  (motion_lora_path, round(float(motion_strength), 2)),