Update app.py
Browse files
app.py
CHANGED
|
@@ -80,6 +80,13 @@ from ltx_pipelines.utils.samplers import res2s_audio_video_denoising_loop
|
|
| 80 |
from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
|
| 81 |
from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
|
| 82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
# Force-patch xformers attention into the LTX attention module.
|
| 84 |
from ltx_core.model.transformer import attention as _attn_mod
|
| 85 |
print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
|
|
|
|
| 80 |
from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
|
| 81 |
from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
|
| 82 |
|
| 83 |
+
from collections.abc import Iterator
|
| 84 |
+
|
| 85 |
+
from ltx_core.components.schedulers import LTX2Scheduler
|
| 86 |
+
from ltx_core.loader.registry import Registry
|
| 87 |
+
from ltx_core.quantization import QuantizationPolicy
|
| 88 |
+
from ltx_pipelines.utils.types import ModalitySpec
|
| 89 |
+
|
| 90 |
# Force-patch xformers attention into the LTX attention module.
|
| 91 |
from ltx_core.model.transformer import attention as _attn_mod
|
| 92 |
print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
|