dagloop5 commited on
Commit
79305ff
·
verified ·
1 Parent(s): 03b5116

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -507,26 +507,27 @@ def generate_video(
507
  except Exception as e:
508
  print(f"[LoRA] pipeline rebuild FAILED: {e}")
509
 
510
- # Force transformer rebuild so LoRA strength actually applies
511
  try:
512
  if hasattr(pipeline, "model_ledger"):
513
- # Hard reset ALL known transformer references
514
  if hasattr(pipeline.model_ledger, "_transformer"):
 
515
  pipeline.model_ledger._transformer = None
516
 
517
- # VERY IMPORTANT: also clear pipeline components cache if present
518
- if hasattr(pipeline, "pipeline_components"):
519
- try:
520
- pipeline.pipeline_components = None
521
- except Exception:
522
- pass
523
 
524
- # Force rebuild NOW (so it uses the new LoRA strength)
525
- _ = pipeline.model_ledger.transformer()
 
526
 
527
- print("[LoRA] transformer force-rebuilt with new strength")
528
  except Exception as e:
529
- print(f"[LoRA] transformer rebuild failed: {e}")
530
 
531
  log_memory("before pipeline call")
532
 
 
507
  except Exception as e:
508
  print(f"[LoRA] pipeline rebuild FAILED: {e}")
509
 
510
+ # Reset transformer so next call rebuilds it with new LoRA strength (NO preloading!)
511
  try:
512
  if hasattr(pipeline, "model_ledger"):
 
513
  if hasattr(pipeline.model_ledger, "_transformer"):
514
+ del pipeline.model_ledger._transformer
515
  pipeline.model_ledger._transformer = None
516
 
517
+ if hasattr(pipeline, "pipeline_components"):
518
+ try:
519
+ del pipeline.pipeline_components
520
+ pipeline.pipeline_components = None
521
+ except Exception:
522
+ pass
523
 
524
+ # CRITICAL: force cleanup BEFORE rebuild happens
525
+ cleanup_memory()
526
+ torch.cuda.empty_cache()
527
 
528
+ print("[LoRA] transformer reset; will rebuild during inference")
529
  except Exception as e:
530
+ print(f"[LoRA] transformer reset failed: {e}")
531
 
532
  log_memory("before pipeline call")
533