dagloop5 commited on
Commit
0444684
·
verified ·
1 Parent(s): 0dd62b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -266,17 +266,6 @@ print("=" * 80)
266
  print("Downloading LTX-2.3 distilled model + Gemma...")
267
  print("=" * 80)
268
 
269
- # ----------------------------
270
- # Pipeline cache for LoRA strengths (keeps at most 2 pipelines to limit VRAM)
271
- # ----------------------------
272
- # Use rounded strengths as keys (2 decimal places)
273
- pipeline_cache: OrderedDict[float, LTX23DistilledA2VPipeline] = OrderedDict()
274
- # Record the current pipeline's LoRA strength (we built the module above with lora_descriptor default 1.0)
275
- current_lora_strength: float = round(1.0, 2)
276
- pipeline_cache[current_lora_strength] = pipeline
277
- CACHE_MAX_SIZE = 2 # keep at most two pipeline instances in memory
278
- print(f"[CACHE] initialized pipeline cache with strength={current_lora_strength}")
279
-
280
  checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
281
  spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
282
  gemma_root = snapshot_download(repo_id=GEMMA_REPO)
@@ -338,6 +327,14 @@ print("=" * 80)
338
  print("Pipeline ready!")
339
  print("=" * 80)
340
 
 
 
 
 
 
 
 
 
341
 
342
  def log_memory(tag: str):
343
  if torch.cuda.is_available():
 
266
  print("Downloading LTX-2.3 distilled model + Gemma...")
267
  print("=" * 80)
268
 
 
 
 
 
 
 
 
 
 
 
 
269
  checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
270
  spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
271
  gemma_root = snapshot_download(repo_id=GEMMA_REPO)
 
327
  print("Pipeline ready!")
328
  print("=" * 80)
329
 
330
+ # ----------------------------
331
+ # Pipeline cache for LoRA strengths (keeps at most 2 pipelines to limit VRAM)
332
+ # ----------------------------
333
+ pipeline_cache: OrderedDict[float, LTX23DistilledA2VPipeline] = OrderedDict()
334
+ current_lora_strength: float = round(1.0, 2)
335
+ pipeline_cache[current_lora_strength] = pipeline
336
+ CACHE_MAX_SIZE = 2
337
+ print(f"[CACHE] initialized pipeline cache with strength={current_lora_strength}")
338
 
339
  def log_memory(tag: str):
340
  if torch.cuda.is_available():