RioShiina commited on
Commit
b7d4bc8
·
verified ·
1 Parent(s): 5f845cc

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +1 -1
  2. app.py +14 -137
  3. chain_injectors/controlnet_injector.py +60 -0
  4. chain_injectors/diffsynth_controlnet_injector.py +75 -0
  5. chain_injectors/flux1_ipadapter_injector.py +46 -0
  6. chain_injectors/ipadapter_injector.py +106 -0
  7. chain_injectors/newbie_lora_injector.py +63 -0
  8. chain_injectors/reference_latent_injector.py +110 -19
  9. chain_injectors/sd3_ipadapter_injector.py +66 -0
  10. chain_injectors/style_injector.py +71 -0
  11. chain_injectors/vae_injector.py +30 -0
  12. comfy_integration/nodes.py +5 -0
  13. comfy_integration/setup.py +36 -13
  14. core/generation_logic.py +0 -15
  15. core/model_manager.py +6 -19
  16. core/pipelines/controlnet_preprocessor.py +0 -143
  17. core/pipelines/sd_image_pipeline.py +224 -59
  18. core/pipelines/workflow_recipes/_partials/{_base_sampler.yaml → _base_sampler_sd.yaml} +15 -2
  19. core/pipelines/workflow_recipes/_partials/conditioning/anima.yaml +54 -0
  20. core/pipelines/workflow_recipes/_partials/conditioning/chroma1-radiance.yaml +59 -0
  21. core/pipelines/workflow_recipes/_partials/conditioning/chroma1.yaml +61 -0
  22. core/pipelines/workflow_recipes/_partials/conditioning/ernie-image.yaml +54 -0
  23. core/pipelines/workflow_recipes/_partials/conditioning/flux1.yaml +64 -0
  24. core/pipelines/workflow_recipes/_partials/conditioning/flux2-kv.yaml +104 -0
  25. core/pipelines/workflow_recipes/_partials/conditioning/flux2.yaml +33 -6
  26. core/pipelines/workflow_recipes/_partials/conditioning/hidream.yaml +53 -0
  27. core/pipelines/workflow_recipes/_partials/conditioning/hunyuanimage.yaml +42 -0
  28. core/pipelines/workflow_recipes/_partials/conditioning/longcat-image.yaml +83 -0
  29. core/pipelines/workflow_recipes/_partials/conditioning/lumina.yaml +57 -0
  30. core/pipelines/workflow_recipes/_partials/conditioning/newbie-image.yaml +65 -0
  31. core/pipelines/workflow_recipes/_partials/conditioning/omnigen2.yaml +59 -0
  32. core/pipelines/workflow_recipes/_partials/conditioning/ovis-image.yaml +50 -0
  33. core/pipelines/workflow_recipes/_partials/conditioning/qwen-image.yaml +80 -0
  34. core/pipelines/workflow_recipes/_partials/conditioning/sd15.yaml +69 -0
  35. core/pipelines/workflow_recipes/_partials/conditioning/sd35.yaml +58 -0
  36. core/pipelines/workflow_recipes/_partials/conditioning/sdxl.yaml +63 -0
  37. core/pipelines/workflow_recipes/_partials/conditioning/z-image.yaml +65 -0
  38. core/pipelines/workflow_recipes/_partials/input/hires_fix.yaml +4 -3
  39. core/pipelines/workflow_recipes/_partials/input/img2img.yaml +3 -2
  40. core/pipelines/workflow_recipes/_partials/input/inpaint.yaml +6 -8
  41. core/pipelines/workflow_recipes/_partials/input/outpaint.yaml +14 -11
  42. core/pipelines/workflow_recipes/_partials/input/txt2img.yaml +2 -8
  43. core/pipelines/workflow_recipes/_partials/input/txt2img_chroma_radiance_latent.yaml +11 -0
  44. core/pipelines/workflow_recipes/_partials/input/txt2img_flux2_latent.yaml +11 -0
  45. core/pipelines/workflow_recipes/_partials/input/txt2img_hunyuan_latent.yaml +11 -0
  46. core/pipelines/workflow_recipes/_partials/input/txt2img_latent.yaml +11 -0
  47. core/pipelines/workflow_recipes/_partials/input/txt2img_sd3_latent.yaml +11 -0
  48. core/pipelines/workflow_recipes/sd_unified_recipe.yaml +2 -2
  49. core/settings.py +111 -31
  50. requirements.txt +7 -6
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: ImageGen - FLUX.2
3
  emoji: 🖼
4
  colorFrom: purple
5
  colorTo: red
 
1
  ---
2
+ title: ImageGen - FLUX.2-KV
3
  emoji: 🖼
4
  colorFrom: purple
5
  colorTo: red
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import spaces
2
  import os
3
  import sys
4
- import requests
5
  import site
6
 
7
  APP_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -45,106 +44,14 @@ def dummy_gpu_for_startup():
45
  print("--- [GPU Startup] Startup check passed. ---")
46
  return "Startup check passed."
47
 
48
- def handle_private_downloads():
49
- """
50
- Checks for a private_file_list.yaml, downloads required models using HF_TOKEN,
51
- and then clears the token from the environment.
52
- """
53
- import yaml
54
- from huggingface_hub import hf_hub_download
55
- from core.settings import (
56
- DIFFUSION_MODELS_DIR, TEXT_ENCODERS_DIR, VAE_DIR, CHECKPOINT_DIR,
57
- LORA_DIR, CONTROLNET_DIR, MODEL_PATCHES_DIR, EMBEDDING_DIR
58
- )
59
-
60
- print("--- [Startup] Checking for private models to download... ---")
61
- private_list_path = os.path.join(APP_DIR, 'yaml', 'private_file_list.yaml')
62
-
63
- if not os.path.exists(private_list_path):
64
- print("--- [Startup] No private model list found. Skipping. ---")
65
- if 'HF_TOKEN' in os.environ:
66
- del os.environ['HF_TOKEN']
67
- print("--- [Startup] Cleared HF_TOKEN environment variable as it is no longer needed. ---")
68
- print(f"--- [Startup] Verifying HF_TOKEN after clearing: {os.environ.get('HF_TOKEN')}")
69
- return
70
-
71
- try:
72
- with open(private_list_path, 'r', encoding='utf-8') as f:
73
- private_files_config = yaml.safe_load(f)
74
-
75
- if not private_files_config or 'file' not in private_files_config:
76
- print("--- [Startup] Private model list is empty or malformed. Skipping. ---")
77
- return
78
-
79
- category_to_dir_map = {
80
- "diffusion_models": DIFFUSION_MODELS_DIR,
81
- "text_encoders": TEXT_ENCODERS_DIR,
82
- "vae": VAE_DIR,
83
- "checkpoints": CHECKPOINT_DIR,
84
- "loras": LORA_DIR,
85
- "controlnet": CONTROLNET_DIR,
86
- "model_patches": MODEL_PATCHES_DIR,
87
- "embeddings": EMBEDDING_DIR,
88
- }
89
-
90
- files_to_download = []
91
- for category, files in private_files_config.get('file', {}).items():
92
- dest_dir = category_to_dir_map.get(category)
93
- if not dest_dir:
94
- print(f"--- [Startup] ⚠️ Unknown category '{category}' in private_file_list.yaml. Skipping. ---")
95
- continue
96
-
97
- if isinstance(files, list):
98
- for file_info in files:
99
- files_to_download.append((file_info, dest_dir))
100
-
101
- if not files_to_download:
102
- print("--- [Startup] No private models configured for download. ---")
103
- return
104
-
105
- print(f"--- [Startup] Found {len(files_to_download)} private model(s) to download. Using HF_TOKEN if available. ---")
106
-
107
- for file_info, dest_dir in files_to_download:
108
- filename = file_info.get("filename")
109
- repo_id = file_info.get("repo_id")
110
- repo_path = file_info.get("repository_file_path", filename)
111
-
112
- if not all([filename, repo_id]):
113
- print(f"--- [Startup] ⚠️ Skipping malformed entry in private_file_list.yaml: {file_info} ---")
114
- continue
115
-
116
- dest_path = os.path.join(dest_dir, filename)
117
- if os.path.lexists(dest_path):
118
- print(f"--- [Startup] ✅ Model '{filename}' already exists. Skipping download. ---")
119
- continue
120
-
121
- print(f"--- [Startup] ⏳ Downloading '{filename}' from repo '{repo_id}'... ---")
122
- try:
123
- cached_path = hf_hub_download(repo_id=repo_id, filename=repo_path)
124
- os.makedirs(dest_dir, exist_ok=True)
125
- os.symlink(cached_path, dest_path)
126
- print(f"--- [Startup] ✅ Successfully downloaded and linked '{filename}'. ---")
127
- except Exception as e:
128
- print(f"--- [Startup] ❌ ERROR: Failed to download '{filename}': {e}")
129
- print("--- [Startup] ❌ Please ensure your HF_TOKEN is set correctly and has access to the repository. ---")
130
-
131
- finally:
132
- if 'HF_TOKEN' in os.environ:
133
- del os.environ['HF_TOKEN']
134
- print("--- [Startup] ✅ Cleared HF_TOKEN environment variable. ---")
135
- print(f"--- [Startup] Verifying HF_TOKEN after clearing: {os.environ.get('HF_TOKEN')}")
136
- else:
137
- print("--- [Startup] Note: HF_TOKEN environment variable was not set. Private downloads may fail without it. ---")
138
 
139
  def main():
140
  from utils.app_utils import print_welcome_message
141
  from scripts import build_sage_attention
 
142
 
143
  print_welcome_message()
144
 
145
- # Handle downloads that require authentication first.
146
- handle_private_downloads()
147
-
148
  print("--- [Setup] Attempting to build and install SageAttention... ---")
149
  try:
150
  build_sage_attention.install_sage_attention()
@@ -152,7 +59,9 @@ def main():
152
  except Exception as e:
153
  print(f"--- [Setup] ❌ SageAttention installation failed: {e}. Continuing with default attention. ---")
154
 
155
-
 
 
156
  print("--- [Setup] Reloading site-packages to detect newly installed packages... ---")
157
  try:
158
  site.main()
@@ -160,52 +69,20 @@ def main():
160
  except Exception as e:
161
  print(f"--- [Setup] ⚠️ Warning: Could not fully reload site-packages: {e} ---")
162
 
163
- from comfy_integration import setup as setup_comfyui
164
- from utils.app_utils import (
165
- build_preprocessor_model_map,
166
- build_preprocessor_parameter_map
167
- )
168
- from core import shared_state
169
- from core.settings import ALL_MODEL_MAP, ALL_FILE_DOWNLOAD_MAP
170
-
171
- def check_all_model_urls_on_startup():
172
- print("--- [Setup] Checking all model URL validity (one-time check) ---")
173
- for display_name, model_info in ALL_MODEL_MAP.items():
174
- _, components, _, _ = model_info
175
- if not components: continue
176
-
177
- for filename in components.values():
178
- download_info = ALL_FILE_DOWNLOAD_MAP.get(filename, {})
179
- repo_id = download_info.get('repo_id')
180
- if not repo_id: continue
181
-
182
- repo_file_path = download_info.get('repository_file_path', filename)
183
- url = f"https://huggingface.co/{repo_id}/resolve/main/{repo_file_path}"
184
-
185
- try:
186
- response = requests.head(url, timeout=5, allow_redirects=True)
187
- if response.status_code >= 400:
188
- print(f"❌ Invalid URL for '{display_name}' component '{filename}': {url} (Status: {response.status_code})")
189
- shared_state.INVALID_MODEL_URLS[display_name] = True
190
- break
191
- except requests.RequestException as e:
192
- print(f"❌ URL check failed for '{display_name}' component '{filename}': {e}")
193
- shared_state.INVALID_MODEL_URLS[display_name] = True
194
- break
195
- print("--- [Setup] ✅ Finished checking model URLs. ---")
196
 
197
  print("--- Starting Application Setup ---")
198
 
199
- setup_comfyui.initialize_comfyui()
 
 
200
 
201
- check_all_model_urls_on_startup()
202
-
203
- print("--- Building ControlNet preprocessor maps ---")
204
- from core.generation_logic import build_reverse_map
205
- build_reverse_map()
206
- build_preprocessor_model_map()
207
- build_preprocessor_parameter_map()
208
- print("--- ✅ ControlNet preprocessor setup complete. ---")
209
 
210
  print("--- Environment configured. Proceeding with module imports. ---")
211
  from ui.layout import build_ui
 
1
  import spaces
2
  import os
3
  import sys
 
4
  import site
5
 
6
  APP_DIR = os.path.dirname(os.path.abspath(__file__))
 
44
  print("--- [GPU Startup] Startup check passed. ---")
45
  return "Startup check passed."
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  def main():
49
  from utils.app_utils import print_welcome_message
50
  from scripts import build_sage_attention
51
+ from comfy_integration import setup as setup_comfyui
52
 
53
  print_welcome_message()
54
 
 
 
 
55
  print("--- [Setup] Attempting to build and install SageAttention... ---")
56
  try:
57
  build_sage_attention.install_sage_attention()
 
59
  except Exception as e:
60
  print(f"--- [Setup] ❌ SageAttention installation failed: {e}. Continuing with default attention. ---")
61
 
62
+ print("--- [Setup] Starting ComfyUI initialization ---")
63
+ setup_comfyui.initialize_comfyui()
64
+
65
  print("--- [Setup] Reloading site-packages to detect newly installed packages... ---")
66
  try:
67
  site.main()
 
69
  except Exception as e:
70
  print(f"--- [Setup] ⚠️ Warning: Could not fully reload site-packages: {e} ---")
71
 
72
+ print("--- Initiating GPU Startup Check & SageAttention Patch ---")
73
+ try:
74
+ dummy_gpu_for_startup()
75
+ except Exception as e:
76
+ print(f"--- [GPU Startup] ⚠️ Warning: Startup check failed: {e} ---")
77
+
78
+ from utils.app_utils import load_ipadapter_presets
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  print("--- Starting Application Setup ---")
81
 
82
+ print("--- Loading IPAdapter presets ---")
83
+ load_ipadapter_presets()
84
+ print("--- ✅ IPAdapter setup complete. ---")
85
 
 
 
 
 
 
 
 
 
86
 
87
  print("--- Environment configured. Proceeding with module imports. ---")
88
  from ui.layout import build_ui
chain_injectors/controlnet_injector.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def inject(assembler, chain_definition, chain_items):
2
+ if not chain_items:
3
+ return
4
+
5
+ ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
6
+ if ksampler_name not in assembler.node_map:
7
+ print(f"Warning: Target node '{ksampler_name}' for ControlNet chain not found. Skipping chain injection.")
8
+ return
9
+
10
+ ksampler_id = assembler.node_map[ksampler_name]
11
+
12
+ if 'positive' not in assembler.workflow[ksampler_id]['inputs'] or \
13
+ 'negative' not in assembler.workflow[ksampler_id]['inputs']:
14
+ print(f"Warning: KSampler node '{ksampler_name}' is missing 'positive' or 'negative' inputs. Skipping ControlNet chain.")
15
+ return
16
+
17
+ vae_source_str = chain_definition.get('vae_source')
18
+ if not vae_source_str:
19
+ print("Warning: 'vae_source' definition missing in the recipe for the ControlNet chain. Skipping.")
20
+ return
21
+ vae_node_name, vae_idx_str = vae_source_str.split(':')
22
+ if vae_node_name not in assembler.node_map:
23
+ print(f"Warning: VAE source node '{vae_node_name}' for ControlNet chain not found. Skipping.")
24
+ return
25
+ vae_connection = [assembler.node_map[vae_node_name], int(vae_idx_str)]
26
+
27
+ current_positive_connection = assembler.workflow[ksampler_id]['inputs']['positive']
28
+ current_negative_connection = assembler.workflow[ksampler_id]['inputs']['negative']
29
+
30
+ for item_data in chain_items:
31
+ cn_loader_id = assembler._get_unique_id()
32
+ cn_loader_node = assembler._get_node_template("ControlNetLoader")
33
+ cn_loader_node['inputs']['control_net_name'] = item_data['control_net_name']
34
+ assembler.workflow[cn_loader_id] = cn_loader_node
35
+
36
+ image_loader_id = assembler._get_unique_id()
37
+ image_loader_node = assembler._get_node_template("LoadImage")
38
+ image_loader_node['inputs']['image'] = item_data['image']
39
+ assembler.workflow[image_loader_id] = image_loader_node
40
+
41
+ apply_cn_id = assembler._get_unique_id()
42
+ apply_cn_node = assembler._get_node_template(chain_definition['template'])
43
+
44
+ apply_cn_node['inputs']['strength'] = item_data['strength']
45
+
46
+ apply_cn_node['inputs']['positive'] = current_positive_connection
47
+ apply_cn_node['inputs']['negative'] = current_negative_connection
48
+ apply_cn_node['inputs']['control_net'] = [cn_loader_id, 0]
49
+ apply_cn_node['inputs']['image'] = [image_loader_id, 0]
50
+ apply_cn_node['inputs']['vae'] = vae_connection
51
+
52
+ assembler.workflow[apply_cn_id] = apply_cn_node
53
+
54
+ current_positive_connection = [apply_cn_id, 0]
55
+ current_negative_connection = [apply_cn_id, 1]
56
+
57
+ assembler.workflow[ksampler_id]['inputs']['positive'] = current_positive_connection
58
+ assembler.workflow[ksampler_id]['inputs']['negative'] = current_negative_connection
59
+
60
+ print(f"ControlNet injector applied. KSampler inputs redirected through {len(chain_items)} ControlNet nodes.")
chain_injectors/diffsynth_controlnet_injector.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def inject(assembler, chain_definition, chain_items):
2
+ if not chain_items:
3
+ return
4
+
5
+ model_sampler_name = chain_definition.get('model_sampler_node')
6
+ ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
7
+
8
+ target_node_id = None
9
+ target_input_name = 'model'
10
+
11
+ if model_sampler_name and model_sampler_name in assembler.node_map:
12
+ model_sampler_id = assembler.node_map[model_sampler_name]
13
+ if target_input_name in assembler.workflow[model_sampler_id]['inputs']:
14
+ target_node_id = model_sampler_id
15
+ print(f"ControlNet Model Patch injector targeting ModelSamplingAuraFlow node '{model_sampler_name}'.")
16
+
17
+ if not target_node_id:
18
+ if ksampler_name in assembler.node_map:
19
+ ksampler_id = assembler.node_map[ksampler_name]
20
+ if target_input_name in assembler.workflow[ksampler_id]['inputs']:
21
+ target_node_id = ksampler_id
22
+ print(f"ControlNet Model Patch injector targeting KSampler node '{ksampler_name}'.")
23
+ else:
24
+ print(f"Warning: Neither ModelSamplingAuraFlow node '{model_sampler_name}' nor KSampler node '{ksampler_name}' found for ControlNet patch chain. Skipping.")
25
+ return
26
+
27
+ if not target_node_id:
28
+ print(f"Warning: Could not find a valid 'model' input on target nodes. Skipping ControlNet patch chain.")
29
+ return
30
+
31
+ current_model_connection = assembler.workflow[target_node_id]['inputs'][target_input_name]
32
+
33
+ vae_source_str = chain_definition.get('vae_source')
34
+ vae_connection = None
35
+ if vae_source_str:
36
+ try:
37
+ vae_node_name, vae_idx_str = vae_source_str.split(':')
38
+ if vae_node_name in assembler.node_map:
39
+ vae_connection = [assembler.node_map[vae_node_name], int(vae_idx_str)]
40
+ else:
41
+ print(f"Warning: VAE source node '{vae_node_name}' not found for ControlNet patch chain. VAE will not be connected.")
42
+ except ValueError:
43
+ print(f"Warning: Invalid 'vae_source' format '{vae_source_str}' for ControlNet patch chain. Expected 'node_name:index'. VAE will not be connected.")
44
+ else:
45
+ print(f"Warning: 'vae_source' not defined for ControlNet patch chain definition. VAE may not be connected.")
46
+
47
+ for item_data in chain_items:
48
+ patch_loader_id = assembler._get_unique_id()
49
+ patch_loader_node = assembler._get_node_template("ModelPatchLoader")
50
+ patch_loader_node['inputs']['name'] = item_data['control_net_name']
51
+ assembler.workflow[patch_loader_id] = patch_loader_node
52
+
53
+ image_loader_id = assembler._get_unique_id()
54
+ image_loader_node = assembler._get_node_template("LoadImage")
55
+ image_loader_node['inputs']['image'] = item_data['image']
56
+ assembler.workflow[image_loader_id] = image_loader_node
57
+
58
+ apply_cn_id = assembler._get_unique_id()
59
+ apply_cn_node = assembler._get_node_template(chain_definition['template'])
60
+
61
+ apply_cn_node['inputs']['strength'] = item_data.get('strength', 1.0)
62
+ apply_cn_node['inputs']['model'] = current_model_connection
63
+ apply_cn_node['inputs']['model_patch'] = [patch_loader_id, 0]
64
+ apply_cn_node['inputs']['image'] = [image_loader_id, 0]
65
+
66
+ if 'vae' in apply_cn_node['inputs'] and vae_connection:
67
+ apply_cn_node['inputs']['vae'] = vae_connection
68
+
69
+ assembler.workflow[apply_cn_id] = apply_cn_node
70
+
71
+ current_model_connection = [apply_cn_id, 0]
72
+
73
+ assembler.workflow[target_node_id]['inputs'][target_input_name] = current_model_connection
74
+
75
+ print(f"ControlNet Model Patch injector applied. Target 'model' input re-routed through {len(chain_items)} patch(es).")
chain_injectors/flux1_ipadapter_injector.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def inject(assembler, chain_definition, chain_items):
2
+ if not chain_items:
3
+ return
4
+
5
+ ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
6
+ if ksampler_name not in assembler.node_map:
7
+ print(f"Warning: KSampler node '{ksampler_name}' not found for Flux1 IPAdapter chain. Skipping.")
8
+ return
9
+
10
+ ksampler_id = assembler.node_map[ksampler_name]
11
+
12
+ if 'model' not in assembler.workflow[ksampler_id]['inputs']:
13
+ print(f"Warning: KSampler node '{ksampler_name}' is missing 'model' input. Skipping Flux1 IPAdapter chain.")
14
+ return
15
+
16
+ current_model_connection = assembler.workflow[ksampler_id]['inputs']['model']
17
+
18
+ for item_data in chain_items:
19
+ image_loader_id = assembler._get_unique_id()
20
+ image_loader_node = assembler._get_node_template("LoadImage")
21
+ image_loader_node['inputs']['image'] = item_data['image']
22
+ assembler.workflow[image_loader_id] = image_loader_node
23
+
24
+ ipadapter_loader_id = assembler._get_unique_id()
25
+ ipadapter_loader_node = assembler._get_node_template("IPAdapterFluxLoader")
26
+ ipadapter_loader_node['inputs']['ipadapter'] = "ip-adapter.bin"
27
+ ipadapter_loader_node['inputs']['clip_vision'] = "google/siglip-so400m-patch14-384"
28
+ ipadapter_loader_node['inputs']['provider'] = "cuda"
29
+ assembler.workflow[ipadapter_loader_id] = ipadapter_loader_node
30
+
31
+ apply_ipa_id = assembler._get_unique_id()
32
+ apply_ipa_node = assembler._get_node_template("ApplyIPAdapterFlux")
33
+
34
+ apply_ipa_node['inputs']['weight'] = item_data['weight']
35
+ apply_ipa_node['inputs']['start_percent'] = item_data.get('start_percent', 0.0)
36
+ apply_ipa_node['inputs']['end_percent'] = item_data.get('end_percent', 0.6)
37
+
38
+ apply_ipa_node['inputs']['model'] = current_model_connection
39
+ apply_ipa_node['inputs']['ipadapter_flux'] = [ipadapter_loader_id, 0]
40
+ apply_ipa_node['inputs']['image'] = [image_loader_id, 0]
41
+
42
+ assembler.workflow[apply_ipa_id] = apply_ipa_node
43
+ current_model_connection = [apply_ipa_id, 0]
44
+
45
+ assembler.workflow[ksampler_id]['inputs']['model'] = current_model_connection
46
+ print(f"Flux1 IPAdapter injector applied. KSampler model input re-routed through {len(chain_items)} IPAdapter(s).")
chain_injectors/ipadapter_injector.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def inject(assembler, chain_definition, chain_items):
2
+ if not chain_items:
3
+ return
4
+
5
+ final_settings = {}
6
+ if chain_items and isinstance(chain_items[-1], dict) and chain_items[-1].get('is_final_settings'):
7
+ final_settings = chain_items.pop()
8
+
9
+ if not chain_items:
10
+ return
11
+
12
+ end_node_name = chain_definition.get('end')
13
+ if not end_node_name or end_node_name not in assembler.node_map:
14
+ print(f"Warning: Target node '{end_node_name}' for IPAdapter chain not found. Skipping chain injection.")
15
+ return
16
+
17
+ end_node_id = assembler.node_map[end_node_name]
18
+
19
+ if 'model' not in assembler.workflow[end_node_id]['inputs']:
20
+ print(f"Warning: Target node '{end_node_name}' is missing 'model' input. Skipping IPAdapter chain.")
21
+ return
22
+
23
+ current_model_connection = assembler.workflow[end_node_id]['inputs']['model']
24
+
25
+ model_type = final_settings.get('model_type', 'sdxl')
26
+ megapixels = 1.05 if model_type == 'sdxl' else 0.39
27
+
28
+ pos_embed_outputs = []
29
+ neg_embed_outputs = []
30
+
31
+ for i, item_data in enumerate(chain_items):
32
+ loader_type = 'FaceID' if 'FACEID' in item_data.get('preset', '') else 'Unified'
33
+
34
+ loader_template_name = "IPAdapterUnifiedLoader"
35
+ if loader_type == 'FaceID':
36
+ loader_template_name = "IPAdapterUnifiedLoaderFaceID"
37
+
38
+ image_loader_id = assembler._get_unique_id()
39
+ image_loader_node = assembler._get_node_template("LoadImage")
40
+ image_loader_node['inputs']['image'] = item_data['image']
41
+ assembler.workflow[image_loader_id] = image_loader_node
42
+
43
+ image_scaler_id = assembler._get_unique_id()
44
+ image_scaler_node = assembler._get_node_template("ImageScaleToTotalPixels")
45
+ image_scaler_node['inputs']['image'] = [image_loader_id, 0]
46
+ image_scaler_node['inputs']['megapixels'] = megapixels
47
+ image_scaler_node['inputs']['upscale_method'] = "lanczos"
48
+ assembler.workflow[image_scaler_id] = image_scaler_node
49
+
50
+ ipadapter_loader_id = assembler._get_unique_id()
51
+ ipadapter_loader_node = assembler._get_node_template(loader_template_name)
52
+ ipadapter_loader_node['inputs']['model'] = current_model_connection
53
+ ipadapter_loader_node['inputs']['preset'] = item_data['preset']
54
+ if loader_type == 'FaceID':
55
+ ipadapter_loader_node['inputs']['lora_strength'] = item_data.get('lora_strength', 0.6)
56
+ assembler.workflow[ipadapter_loader_id] = ipadapter_loader_node
57
+
58
+ encoder_id = assembler._get_unique_id()
59
+ encoder_node = assembler._get_node_template("IPAdapterEncoder")
60
+ encoder_node['inputs']['weight'] = item_data['weight']
61
+ encoder_node['inputs']['ipadapter'] = [ipadapter_loader_id, 1]
62
+ encoder_node['inputs']['image'] = [image_scaler_id, 0]
63
+ assembler.workflow[encoder_id] = encoder_node
64
+
65
+ pos_embed_outputs.append([encoder_id, 0])
66
+ neg_embed_outputs.append([encoder_id, 1])
67
+
68
+ pos_combiner_id = assembler._get_unique_id()
69
+ pos_combiner_node = assembler._get_node_template("IPAdapterCombineEmbeds")
70
+ pos_combiner_node['inputs']['method'] = final_settings.get('final_combine_method', 'concat')
71
+ for i, conn in enumerate(pos_embed_outputs):
72
+ pos_combiner_node['inputs'][f'embed{i+1}'] = conn
73
+ assembler.workflow[pos_combiner_id] = pos_combiner_node
74
+
75
+ neg_combiner_id = assembler._get_unique_id()
76
+ neg_combiner_node = assembler._get_node_template("IPAdapterCombineEmbeds")
77
+ neg_combiner_node['inputs']['method'] = final_settings.get('final_combine_method', 'concat')
78
+ for i, conn in enumerate(neg_embed_outputs):
79
+ neg_combiner_node['inputs'][f'embed{i+1}'] = conn
80
+ assembler.workflow[neg_combiner_id] = neg_combiner_node
81
+
82
+ final_loader_type = 'FaceID' if 'FACEID' in final_settings.get('final_preset', '') else 'Unified'
83
+ final_loader_template_name = "IPAdapterUnifiedLoader"
84
+ if final_loader_type == 'FaceID':
85
+ final_loader_template_name = "IPAdapterUnifiedLoaderFaceID"
86
+
87
+ final_loader_id = assembler._get_unique_id()
88
+ final_loader_node = assembler._get_node_template(final_loader_template_name)
89
+ final_loader_node['inputs']['model'] = current_model_connection
90
+ final_loader_node['inputs']['preset'] = final_settings.get('final_preset', 'STANDARD (medium strength)')
91
+ if final_loader_type == 'FaceID':
92
+ final_loader_node['inputs']['lora_strength'] = final_settings.get('final_lora_strength', 0.6)
93
+ assembler.workflow[final_loader_id] = final_loader_node
94
+
95
+ apply_embeds_id = assembler._get_unique_id()
96
+ apply_embeds_node = assembler._get_node_template("IPAdapterEmbeds")
97
+ apply_embeds_node['inputs']['weight'] = final_settings.get('final_weight', 1.0)
98
+ apply_embeds_node['inputs']['embeds_scaling'] = final_settings.get('final_embeds_scaling', 'V only')
99
+ apply_embeds_node['inputs']['model'] = [final_loader_id, 0]
100
+ apply_embeds_node['inputs']['ipadapter'] = [final_loader_id, 1]
101
+ apply_embeds_node['inputs']['pos_embed'] = [pos_combiner_id, 0]
102
+ apply_embeds_node['inputs']['neg_embed'] = [neg_combiner_id, 0]
103
+ assembler.workflow[apply_embeds_id] = apply_embeds_node
104
+
105
+ assembler.workflow[end_node_id]['inputs']['model'] = [apply_embeds_id, 0]
106
+ print(f"IPAdapter injector applied. Redirected '{end_node_name}' model input through {len(chain_items)} reference images.")
chain_injectors/newbie_lora_injector.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+
3
+ def inject(assembler, chain_definition, chain_items):
4
+ if not chain_items:
5
+ return
6
+
7
+ output_map = chain_definition.get('output_map', {})
8
+ current_connections = {}
9
+ for key, type_name in output_map.items():
10
+ if ':' in str(key):
11
+ node_name, idx_str = key.split(':')
12
+ if node_name not in assembler.node_map:
13
+ print(f"Warning: [NewBie LoRA Injector] Node '{node_name}' in chain's output_map not found. Skipping.")
14
+ continue
15
+ node_id = assembler.node_map[node_name]
16
+ start_output_idx = int(idx_str)
17
+ current_connections[type_name] = [node_id, start_output_idx]
18
+ else:
19
+ print(f"Warning: [NewBie LoRA Injector] output_map key '{key}' is not in 'node:index' format. Skipping this connection.")
20
+
21
+ template_name = chain_definition.get('template')
22
+ if not template_name:
23
+ print(f"Warning: [NewBie LoRA Injector] No 'template' defined for chain. Skipping.")
24
+ return
25
+
26
+ for item_data in chain_items:
27
+ template = assembler._get_node_template(template_name)
28
+ node_data = deepcopy(template)
29
+
30
+ node_data['inputs']['lora_name'] = item_data.get('lora_name')
31
+ node_data['inputs']['strength'] = item_data.get('strength_model', 1.0)
32
+ node_data['inputs']['enabled'] = True
33
+
34
+ if 'model' in current_connections:
35
+ node_data['inputs']['model'] = current_connections['model']
36
+ if 'clip' in current_connections:
37
+ node_data['inputs']['clip'] = current_connections['clip']
38
+
39
+ new_node_id = assembler._get_unique_id()
40
+ assembler.workflow[new_node_id] = node_data
41
+
42
+ current_connections['model'] = [new_node_id, 0]
43
+ current_connections['clip'] = [new_node_id, 1]
44
+
45
+ end_input_map = chain_definition.get('end_input_map', {})
46
+ for type_name, targets in end_input_map.items():
47
+ if type_name in current_connections:
48
+ if not isinstance(targets, list):
49
+ targets = [targets]
50
+
51
+ for target_str in targets:
52
+ try:
53
+ end_node_name, end_input_name = target_str.split(':')
54
+ if end_node_name in assembler.node_map:
55
+ end_node_id = assembler.node_map[end_node_name]
56
+ assembler.workflow[end_node_id]['inputs'][end_input_name] = current_connections[type_name]
57
+ else:
58
+ print(f"Warning: [NewBie LoRA Injector] End node '{end_node_name}' for dynamic chain not found. Skipping connection.")
59
+ except ValueError:
60
+ print(f"Warning: [NewBie LoRA Injector] Invalid target format '{target_str}' in end_input_map. Skipping.")
61
+
62
+ if chain_items:
63
+ print(f"NewBie LoRA injector applied. Re-routed model and clip through {len(chain_items)} LoRA(s).")
chain_injectors/reference_latent_injector.py CHANGED
@@ -2,15 +2,78 @@ def inject(assembler, chain_definition, chain_items):
2
  if not chain_items:
3
  return
4
 
5
- ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
6
- flux_guidance_name = chain_definition.get('flux_guidance_node')
 
7
  vae_node_name = chain_definition.get('vae_node', 'vae_loader')
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  if ksampler_name not in assembler.node_map:
10
- print(f"Warning: [ReferenceLatent] KSampler node '{ksampler_name}' not found. Skipping.")
11
  return
12
  if vae_node_name not in assembler.node_map:
13
- print(f"Warning: [ReferenceLatent] VAE loader node '{vae_node_name}' not found. Skipping.")
14
  return
15
 
16
  ksampler_id = assembler.node_map[ksampler_name]
@@ -23,44 +86,72 @@ def inject(assembler, chain_definition, chain_items):
23
  if 'conditioning' in assembler.workflow[flux_guidance_id]['inputs']:
24
  pos_target_node_id = flux_guidance_id
25
  pos_target_input_name = 'conditioning'
26
- print(f"ReferenceLatent injector targeting FluxGuidance node '{flux_guidance_name}'.")
27
 
28
  if not pos_target_node_id:
29
  if 'positive' in assembler.workflow[ksampler_id]['inputs']:
30
  pos_target_node_id = ksampler_id
31
  pos_target_input_name = 'positive'
32
- print(f"ReferenceLatent injector targeting KSampler node '{ksampler_name}'.")
33
  else:
34
- print(f"Warning: [ReferenceLatent] Could not find a valid positive injection point. Skipping.")
35
  return
36
 
37
  current_pos_conditioning = assembler.workflow[pos_target_node_id]['inputs'][pos_target_input_name]
38
 
39
- for i, img_filename in enumerate(chain_items):
40
- if not img_filename or not isinstance(img_filename, str):
41
- continue
 
 
 
 
 
 
42
 
 
43
  load_id = assembler._get_unique_id()
44
  load_node = assembler._get_node_template("LoadImage")
45
  load_node['inputs']['image'] = img_filename
 
46
  assembler.workflow[load_id] = load_node
47
 
 
 
 
 
 
 
 
 
48
  vae_encode_id = assembler._get_unique_id()
49
  vae_encode_node = assembler._get_node_template("VAEEncode")
50
- vae_encode_node['inputs']['pixels'] = [load_id, 0]
51
  vae_encode_node['inputs']['vae'] = [vae_node_id, 0]
 
52
  assembler.workflow[vae_encode_id] = vae_encode_node
53
 
54
  latent_conn = [vae_encode_id, 0]
55
 
56
- ref_latent_id = assembler._get_unique_id()
57
- ref_latent_node = assembler._get_node_template("ReferenceLatent")
58
- ref_latent_node['inputs']['conditioning'] = current_pos_conditioning
59
- ref_latent_node['inputs']['latent'] = latent_conn
60
- assembler.workflow[ref_latent_id] = ref_latent_node
61
-
62
- current_pos_conditioning = [ref_latent_id, 0]
 
 
 
 
 
 
 
 
 
63
 
64
  assembler.workflow[pos_target_node_id]['inputs'][pos_target_input_name] = current_pos_conditioning
 
 
65
 
66
- print(f"ReferenceLatent injector applied. Re-routed inputs through {len(chain_items)} reference image(s).")
 
2
  if not chain_items:
3
  return
4
 
5
+ guider_node_name = chain_definition.get('guider_node')
6
+ guider_target_inputs = chain_definition.get('guider_target_inputs', [])
7
+ start_connections_map = chain_definition.get('start_connections', {})
8
  vae_node_name = chain_definition.get('vae_node', 'vae_loader')
9
 
10
+ if guider_node_name and guider_node_name in assembler.node_map and guider_target_inputs:
11
+ guider_id = assembler.node_map[guider_node_name]
12
+ if vae_node_name not in assembler.node_map:
13
+ print(f"Warning: VAE node '{vae_node_name}' not found for Guider chain. Skipping.")
14
+ return
15
+ vae_node_id = assembler.node_map[vae_node_name]
16
+
17
+ print(f"ReferenceLatent injector targeting DualCFGGuider node '{guider_node_name}'.")
18
+
19
+ current_connections = {}
20
+ for target_input in guider_target_inputs:
21
+ conn_str = start_connections_map.get(target_input)
22
+ if not conn_str:
23
+ print(f"Warning: No start connection defined for '{target_input}' in Guider chain. Skipping this input.")
24
+ continue
25
+ try:
26
+ node_name, idx_str = conn_str.split(':')
27
+ node_id = assembler.node_map[node_name]
28
+ current_connections[target_input] = [node_id, int(idx_str)]
29
+ except (ValueError, KeyError):
30
+ print(f"Warning: Invalid start connection '{conn_str}' for '{target_input}'. Skipping.")
31
+
32
+ encoded_latents = []
33
+ for i, img_filename in enumerate(chain_items):
34
+ load_id = assembler._get_unique_id()
35
+ load_node = assembler._get_node_template("LoadImage")
36
+ load_node['inputs']['image'] = img_filename
37
+ assembler.workflow[load_id] = load_node
38
+
39
+ scale_id = assembler._get_unique_id()
40
+ scale_node = assembler._get_node_template("ImageScaleToTotalPixels")
41
+ scale_node['inputs']['megapixels'] = 1.0
42
+ scale_node['inputs']['upscale_method'] = "lanczos"
43
+ scale_node['inputs']['image'] = [load_id, 0]
44
+ assembler.workflow[scale_id] = scale_node
45
+
46
+ vae_encode_id = assembler._get_unique_id()
47
+ vae_encode_node = assembler._get_node_template("VAEEncode")
48
+ vae_encode_node['inputs']['pixels'] = [scale_id, 0]
49
+ vae_encode_node['inputs']['vae'] = [vae_node_id, 0]
50
+ assembler.workflow[vae_encode_id] = vae_encode_node
51
+ encoded_latents.append([vae_encode_id, 0])
52
+
53
+ for target_input_name, start_connection in current_connections.items():
54
+ current_chain_head = start_connection
55
+ for i, latent_conn in enumerate(encoded_latents):
56
+ ref_latent_id = assembler._get_unique_id()
57
+ ref_latent_node = assembler._get_node_template("ReferenceLatent")
58
+ ref_latent_node['inputs']['conditioning'] = current_chain_head
59
+ ref_latent_node['inputs']['latent'] = latent_conn
60
+ ref_latent_node['_meta']['title'] = f"{target_input_name} RefLatent {i+1}"
61
+ assembler.workflow[ref_latent_id] = ref_latent_node
62
+ current_chain_head = [ref_latent_id, 0]
63
+
64
+ assembler.workflow[guider_id]['inputs'][target_input_name] = current_chain_head
65
+ print(f" - Input '{target_input_name}' of node '{guider_node_name}' re-routed through {len(chain_items)} reference images.")
66
+
67
+ return
68
+
69
+ flux_guidance_name = chain_definition.get('flux_guidance_node')
70
+ ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
71
+
72
  if ksampler_name not in assembler.node_map:
73
+ print(f"Warning: KSampler node '{ksampler_name}' not found for ReferenceLatent chain. Skipping.")
74
  return
75
  if vae_node_name not in assembler.node_map:
76
+ print(f"Warning: VAE loader node '{vae_node_name}' not found for ReferenceLatent chain. Skipping.")
77
  return
78
 
79
  ksampler_id = assembler.node_map[ksampler_name]
 
86
  if 'conditioning' in assembler.workflow[flux_guidance_id]['inputs']:
87
  pos_target_node_id = flux_guidance_id
88
  pos_target_input_name = 'conditioning'
89
+ print(f"ReferenceLatent injector targeting FluxGuidance node '{flux_guidance_name}' for positive chain.")
90
 
91
  if not pos_target_node_id:
92
  if 'positive' in assembler.workflow[ksampler_id]['inputs']:
93
  pos_target_node_id = ksampler_id
94
  pos_target_input_name = 'positive'
95
+ print(f"ReferenceLatent injector targeting KSampler node '{ksampler_name}' for positive chain.")
96
  else:
97
+ print(f"Warning: Could not find a valid positive injection point for ReferenceLatent chain. Skipping.")
98
  return
99
 
100
  current_pos_conditioning = assembler.workflow[pos_target_node_id]['inputs'][pos_target_input_name]
101
 
102
+ neg_target_node_id = ksampler_id
103
+ neg_target_input_name = 'negative'
104
+ if 'negative' not in assembler.workflow[neg_target_node_id]['inputs']:
105
+ print(f"Warning: KSampler node '{ksampler_name}' has no 'negative' input. Skipping negative ReferenceLatent chain.")
106
+ neg_target_node_id = None
107
+
108
+ current_neg_conditioning = None
109
+ if neg_target_node_id:
110
+ current_neg_conditioning = assembler.workflow[neg_target_node_id]['inputs'][neg_target_input_name]
111
 
112
+ for i, img_filename in enumerate(chain_items):
113
  load_id = assembler._get_unique_id()
114
  load_node = assembler._get_node_template("LoadImage")
115
  load_node['inputs']['image'] = img_filename
116
+ load_node['_meta']['title'] = f"Load Reference Image {i+1}"
117
  assembler.workflow[load_id] = load_node
118
 
119
+ scale_id = assembler._get_unique_id()
120
+ scale_node = assembler._get_node_template("ImageScaleToTotalPixels")
121
+ scale_node['inputs']['megapixels'] = 1.0
122
+ scale_node['inputs']['upscale_method'] = "lanczos"
123
+ scale_node['inputs']['image'] = [load_id, 0]
124
+ scale_node['_meta']['title'] = f"Scale Reference {i+1}"
125
+ assembler.workflow[scale_id] = scale_node
126
+
127
  vae_encode_id = assembler._get_unique_id()
128
  vae_encode_node = assembler._get_node_template("VAEEncode")
129
+ vae_encode_node['inputs']['pixels'] = [scale_id, 0]
130
  vae_encode_node['inputs']['vae'] = [vae_node_id, 0]
131
+ vae_encode_node['_meta']['title'] = f"VAE Encode Reference {i+1}"
132
  assembler.workflow[vae_encode_id] = vae_encode_node
133
 
134
  latent_conn = [vae_encode_id, 0]
135
 
136
+ pos_ref_latent_id = assembler._get_unique_id()
137
+ pos_ref_latent_node = assembler._get_node_template("ReferenceLatent")
138
+ pos_ref_latent_node['inputs']['conditioning'] = current_pos_conditioning
139
+ pos_ref_latent_node['inputs']['latent'] = latent_conn
140
+ pos_ref_latent_node['_meta']['title'] = f"Positive ReferenceLatent {i+1}"
141
+ assembler.workflow[pos_ref_latent_id] = pos_ref_latent_node
142
+ current_pos_conditioning = [pos_ref_latent_id, 0]
143
+
144
+ if neg_target_node_id:
145
+ neg_ref_latent_id = assembler._get_unique_id()
146
+ neg_ref_latent_node = assembler._get_node_template("ReferenceLatent")
147
+ neg_ref_latent_node['inputs']['conditioning'] = current_neg_conditioning
148
+ neg_ref_latent_node['inputs']['latent'] = latent_conn
149
+ neg_ref_latent_node['_meta']['title'] = f"Negative ReferenceLatent {i+1}"
150
+ assembler.workflow[neg_ref_latent_id] = neg_ref_latent_node
151
+ current_neg_conditioning = [neg_ref_latent_id, 0]
152
 
153
  assembler.workflow[pos_target_node_id]['inputs'][pos_target_input_name] = current_pos_conditioning
154
+ if neg_target_node_id:
155
+ assembler.workflow[neg_target_node_id]['inputs'][neg_target_input_name] = current_neg_conditioning
156
 
157
+ print(f"ReferenceLatent injector applied. Re-routed inputs through {len(chain_items)} reference images.")
chain_injectors/sd3_ipadapter_injector.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def inject(assembler, chain_definition, chain_items):
2
+ if not chain_items:
3
+ return
4
+
5
+ ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
6
+ if ksampler_name not in assembler.node_map:
7
+ print(f"Warning: KSampler node '{ksampler_name}' not found for SD3 IPAdapter chain. Skipping.")
8
+ return
9
+
10
+ ksampler_id = assembler.node_map[ksampler_name]
11
+
12
+ if 'model' not in assembler.workflow[ksampler_id]['inputs']:
13
+ print(f"Warning: KSampler node '{ksampler_name}' is missing 'model' input. Skipping SD3 IPAdapter chain.")
14
+ return
15
+
16
+ current_model_connection = assembler.workflow[ksampler_id]['inputs']['model']
17
+
18
+ clip_vision_loader_id = assembler._get_unique_id()
19
+ clip_vision_loader_node = assembler._get_node_template("CLIPVisionLoader")
20
+ clip_vision_loader_node['inputs']['clip_name'] = "sigclip_vision_patch14_384.safetensors"
21
+ assembler.workflow[clip_vision_loader_id] = clip_vision_loader_node
22
+
23
+ ipadapter_loader_id = assembler._get_unique_id()
24
+ ipadapter_loader_node = assembler._get_node_template("IPAdapterSD3Loader")
25
+ ipadapter_loader_node['inputs']['ipadapter'] = "ip-adapter_sd35l_instantx.bin"
26
+ ipadapter_loader_node['inputs']['provider'] = "cuda"
27
+ assembler.workflow[ipadapter_loader_id] = ipadapter_loader_node
28
+
29
+ for item_data in chain_items:
30
+ image_loader_id = assembler._get_unique_id()
31
+ image_loader_node = assembler._get_node_template("LoadImage")
32
+ image_loader_node['inputs']['image'] = item_data['image']
33
+ assembler.workflow[image_loader_id] = image_loader_node
34
+
35
+ image_scaler_id = assembler._get_unique_id()
36
+ image_scaler_node = assembler._get_node_template("ImageScaleToTotalPixels")
37
+ image_scaler_node['inputs']['image'] = [image_loader_id, 0]
38
+ image_scaler_node['inputs']['upscale_method'] = 'nearest-exact'
39
+ image_scaler_node['inputs']['megapixels'] = 1.0
40
+ assembler.workflow[image_scaler_id] = image_scaler_node
41
+
42
+ clip_vision_encode_id = assembler._get_unique_id()
43
+ clip_vision_encode_node = assembler._get_node_template("CLIPVisionEncode")
44
+ clip_vision_encode_node['inputs']['crop'] = "center"
45
+ clip_vision_encode_node['inputs']['clip_vision'] = [clip_vision_loader_id, 0]
46
+ clip_vision_encode_node['inputs']['image'] = [image_scaler_id, 0]
47
+ assembler.workflow[clip_vision_encode_id] = clip_vision_encode_node
48
+
49
+ apply_ipa_id = assembler._get_unique_id()
50
+ apply_ipa_node = assembler._get_node_template("ApplyIPAdapterSD3")
51
+
52
+ apply_ipa_node['inputs']['weight'] = item_data.get('weight', 1.0)
53
+ apply_ipa_node['inputs']['start_percent'] = item_data.get('start_percent', 0.0)
54
+ apply_ipa_node['inputs']['end_percent'] = item_data.get('end_percent', 1.0)
55
+
56
+ apply_ipa_node['inputs']['model'] = current_model_connection
57
+ apply_ipa_node['inputs']['ipadapter'] = [ipadapter_loader_id, 0]
58
+ apply_ipa_node['inputs']['image_embed'] = [clip_vision_encode_id, 0]
59
+
60
+ assembler.workflow[apply_ipa_id] = apply_ipa_node
61
+
62
+ current_model_connection = [apply_ipa_id, 0]
63
+
64
+ assembler.workflow[ksampler_id]['inputs']['model'] = current_model_connection
65
+
66
+ print(f"SD3 IPAdapter injector applied. KSampler model input re-routed through {len(chain_items)} IPAdapter(s).")
chain_injectors/style_injector.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def inject(assembler, chain_definition, chain_items):
2
+ if not chain_items:
3
+ return
4
+
5
+ flux_guidance_name = chain_definition.get('flux_guidance_node')
6
+ ksampler_name = chain_definition.get('ksampler_node', 'ksampler')
7
+
8
+ target_node_id = None
9
+ target_input_name = None
10
+
11
+ if flux_guidance_name and flux_guidance_name in assembler.node_map:
12
+ flux_guidance_id = assembler.node_map[flux_guidance_name]
13
+ if 'conditioning' in assembler.workflow[flux_guidance_id]['inputs']:
14
+ target_node_id = flux_guidance_id
15
+ target_input_name = 'conditioning'
16
+
17
+ if not target_node_id:
18
+ if ksampler_name in assembler.node_map:
19
+ ksampler_id = assembler.node_map[ksampler_name]
20
+ if 'positive' in assembler.workflow[ksampler_id]['inputs']:
21
+ target_node_id = ksampler_id
22
+ target_input_name = 'positive'
23
+ else:
24
+ return
25
+
26
+ if not target_node_id:
27
+ return
28
+
29
+ current_conditioning = assembler.workflow[target_node_id]['inputs'][target_input_name]
30
+
31
+ style_model_loader_id = assembler._get_unique_id()
32
+ style_model_loader_node = assembler._get_node_template("StyleModelLoader")
33
+ style_model_loader_node['inputs']['style_model_name'] = "flux1-redux-dev.safetensors"
34
+ assembler.workflow[style_model_loader_id] = style_model_loader_node
35
+
36
+ clip_vision_loader_id = assembler._get_unique_id()
37
+ clip_vision_loader_node = assembler._get_node_template("CLIPVisionLoader")
38
+ clip_vision_loader_node['inputs']['clip_name'] = "sigclip_vision_patch14_384.safetensors"
39
+ assembler.workflow[clip_vision_loader_id] = clip_vision_loader_node
40
+
41
+ for item_data in chain_items:
42
+ image = item_data.get('image')
43
+ strength = item_data.get('strength', 1.0)
44
+ if not image or strength is None:
45
+ continue
46
+
47
+ load_image_id = assembler._get_unique_id()
48
+ clip_vision_encode_id = assembler._get_unique_id()
49
+ style_apply_id = assembler._get_unique_id()
50
+
51
+ load_image_node = assembler._get_node_template("LoadImage")
52
+ clip_vision_encode_node = assembler._get_node_template("CLIPVisionEncode")
53
+ style_apply_node = assembler._get_node_template("StyleModelApply")
54
+
55
+ load_image_node['inputs']['image'] = image
56
+ clip_vision_encode_node['inputs']['crop'] = "center"
57
+ clip_vision_encode_node['inputs']['clip_vision'] = [clip_vision_loader_id, 0]
58
+ clip_vision_encode_node['inputs']['image'] = [load_image_id, 0]
59
+
60
+ style_apply_node['inputs']['strength'] = strength
61
+ style_apply_node['inputs']['strength_type'] = "multiply"
62
+ style_apply_node['inputs']['conditioning'] = current_conditioning
63
+ style_apply_node['inputs']['style_model'] = [style_model_loader_id, 0]
64
+ style_apply_node['inputs']['clip_vision_output'] = [clip_vision_encode_id, 0]
65
+
66
+ assembler.workflow[load_image_id] = load_image_node
67
+ assembler.workflow[clip_vision_encode_id] = clip_vision_encode_node
68
+ assembler.workflow[style_apply_id] = style_apply_node
69
+ current_conditioning = [style_apply_id, 0]
70
+
71
+ assembler.workflow[target_node_id]['inputs'][target_input_name] = current_conditioning
chain_injectors/vae_injector.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def inject(assembler, chain_definition, chain_items):
2
+ if not chain_items:
3
+ return
4
+
5
+ vae_name = chain_items[0] if isinstance(chain_items, list) else chain_items
6
+ if not vae_name or vae_name == "None":
7
+ return
8
+
9
+ targets = chain_definition.get('targets', [])
10
+ if not targets:
11
+ return
12
+
13
+ vae_loader_id = assembler._get_unique_id()
14
+ vae_loader_node = assembler._get_node_template("VAELoader")
15
+ vae_loader_node['inputs']['vae_name'] = vae_name
16
+ assembler.workflow[vae_loader_id] = vae_loader_node
17
+
18
+ injected_count = 0
19
+ for target_str in targets:
20
+ try:
21
+ node_name, input_name = target_str.split(':')
22
+ if node_name in assembler.node_map:
23
+ node_id = assembler.node_map[node_name]
24
+ assembler.workflow[node_id]['inputs'][input_name] = [vae_loader_id, 0]
25
+ injected_count += 1
26
+ except ValueError:
27
+ print(f"Warning: Invalid VAE injector target format '{target_str}'. Expected 'node_name:input_name'.")
28
+
29
+ if injected_count > 0:
30
+ print(f"VAE injector applied. Rerouted {injected_count} connection(s) to new VAELoader ({vae_name}).")
comfy_integration/nodes.py CHANGED
@@ -23,6 +23,11 @@ CLIPTextEncodeSDXL = NODE_CLASS_MAPPINGS['CLIPTextEncodeSDXL']
23
  LoraLoader = NODE_CLASS_MAPPINGS['LoraLoader']
24
  CLIPSetLastLayer = NODE_CLASS_MAPPINGS['CLIPSetLastLayer']
25
 
 
 
 
 
 
26
  try:
27
  KSamplerNode = NODE_CLASS_MAPPINGS['KSampler']
28
  SAMPLER_CHOICES = KSamplerNode.INPUT_TYPES()["required"]["sampler_name"][0]
 
23
  LoraLoader = NODE_CLASS_MAPPINGS['LoraLoader']
24
  CLIPSetLastLayer = NODE_CLASS_MAPPINGS['CLIPSetLastLayer']
25
 
26
+ if 'EmptyHunyuanImageLatent' in NODE_CLASS_MAPPINGS:
27
+ EmptyHunyuanImageLatent = NODE_CLASS_MAPPINGS['EmptyHunyuanImageLatent']
28
+ else:
29
+ print("⚠️ Warning: 'EmptyHunyuanImageLatent' not found in NODE_CLASS_MAPPINGS. HunyuanImage txt2img may fail if this node is required.")
30
+
31
  try:
32
  KSamplerNode = NODE_CLASS_MAPPINGS['KSampler']
33
  SAMPLER_CHOICES = KSamplerNode.INPUT_TYPES()["required"]["sampler_name"][0]
comfy_integration/setup.py CHANGED
@@ -39,14 +39,40 @@ def initialize_comfyui():
39
  except OSError as e:
40
  print(f"⚠️ Could not remove temporary directory '{COMFYUI_TEMP_DIR}': {e}")
41
 
 
42
  print("--- Cloning third-party extensions for ComfyUI ---")
43
- controlnet_aux_path = os.path.join(APP_DIR, "custom_nodes", "comfyui_controlnet_aux")
44
- if not os.path.exists(controlnet_aux_path):
45
- os.system(f"git clone https://github.com/Fannovel16/comfyui_controlnet_aux.git {controlnet_aux_path}")
46
- print("✅ comfyui_controlnet_aux extension cloned.")
 
 
47
  else:
48
- print("✅ comfyui_controlnet_aux extension already exists.")
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  print(f"✅ Current working directory is: {os.getcwd()}")
52
 
@@ -55,13 +81,10 @@ def initialize_comfyui():
55
 
56
  print("✅ ComfyUI initialized with default attention mechanism.")
57
 
58
- os.makedirs(os.path.join(APP_DIR, CHECKPOINT_DIR), exist_ok=True)
59
- os.makedirs(os.path.join(APP_DIR, LORA_DIR), exist_ok=True)
60
- os.makedirs(os.path.join(APP_DIR, EMBEDDING_DIR), exist_ok=True)
61
- os.makedirs(os.path.join(APP_DIR, CONTROLNET_DIR), exist_ok=True)
62
- os.makedirs(os.path.join(APP_DIR, MODEL_PATCHES_DIR), exist_ok=True)
63
- os.makedirs(os.path.join(APP_DIR, DIFFUSION_MODELS_DIR), exist_ok=True)
64
- os.makedirs(os.path.join(APP_DIR, VAE_DIR), exist_ok=True)
65
- os.makedirs(os.path.join(APP_DIR, TEXT_ENCODERS_DIR), exist_ok=True)
66
  os.makedirs(os.path.join(APP_DIR, INPUT_DIR), exist_ok=True)
 
 
67
  print("✅ All required model directories are present.")
 
39
  except OSError as e:
40
  print(f"⚠️ Could not remove temporary directory '{COMFYUI_TEMP_DIR}': {e}")
41
 
42
+
43
  print("--- Cloning third-party extensions for ComfyUI ---")
44
+
45
+ # 1. ComfyUI_IPAdapter_plus
46
+ ipadapter_plus_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI_IPAdapter_plus")
47
+ if not os.path.exists(ipadapter_plus_path):
48
+ os.system(f"git clone https://github.com/cubiq/ComfyUI_IPAdapter_plus.git {ipadapter_plus_path}")
49
+ print("✅ ComfyUI_IPAdapter_plus extension cloned.")
50
  else:
51
+ print("✅ ComfyUI_IPAdapter_plus extension already exists.")
52
 
53
+ # 2. ComfyUI-InstantX-IPAdapter-SD3
54
+ ipadapter_plus_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI-InstantX-IPAdapter-SD3")
55
+ if not os.path.exists(ipadapter_plus_path):
56
+ os.system(f"git clone https://github.com/Slickytail/ComfyUI-InstantX-IPAdapter-SD3.git {ipadapter_plus_path}")
57
+ print("✅ ComfyUI-InstantX-IPAdapter-SD3 extension cloned.")
58
+ else:
59
+ print("✅ ComfyUI-InstantX-IPAdapter-SD3 extension already exists.")
60
+
61
+ # 3. ComfyUI-IPAdapter-Flux
62
+ ipadapter_flux_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI-IPAdapter-Flux")
63
+ if not os.path.exists(ipadapter_flux_path):
64
+ os.system(f"git clone https://github.com/Shakker-Labs/ComfyUI-IPAdapter-Flux.git {ipadapter_flux_path}")
65
+ print("✅ ComfyUI-IPAdapter-Flux extension cloned.")
66
+ else:
67
+ print("✅ ComfyUI-IPAdapter-Flux extension already exists.")
68
+
69
+ # 4. ComfyUI-Newbie-Nodes
70
+ newbie_nodes_path = os.path.join(APP_DIR, "custom_nodes", "ComfyUI-Newbie-Nodes")
71
+ if not os.path.exists(newbie_nodes_path):
72
+ os.system(f"git clone https://github.com/NewBieAI-Lab/ComfyUI-Newbie-Nodes.git {newbie_nodes_path}")
73
+ print("✅ ComfyUI-Newbie-Nodes extension cloned.")
74
+ else:
75
+ print("✅ ComfyUI-Newbie-Nodes extension already exists.")
76
 
77
  print(f"✅ Current working directory is: {os.getcwd()}")
78
 
 
81
 
82
  print("✅ ComfyUI initialized with default attention mechanism.")
83
 
84
+ for dir_path in CATEGORY_TO_DIR_MAP.values():
85
+ os.makedirs(os.path.join(APP_DIR, dir_path), exist_ok=True)
86
+
 
 
 
 
 
87
  os.makedirs(os.path.join(APP_DIR, INPUT_DIR), exist_ok=True)
88
+ os.makedirs(os.path.join(APP_DIR, OUTPUT_DIR), exist_ok=True)
89
+
90
  print("✅ All required model directories are present.")
core/generation_logic.py CHANGED
@@ -1,25 +1,10 @@
1
  from typing import Any, Dict
2
  import gradio as gr
3
 
4
- from core.pipelines.controlnet_preprocessor import ControlNetPreprocessorPipeline
5
  from core.pipelines.sd_image_pipeline import SdImagePipeline
6
 
7
- controlnet_preprocessor_pipeline = ControlNetPreprocessorPipeline()
8
  sd_image_pipeline = SdImagePipeline()
9
 
10
 
11
- def build_reverse_map():
12
- from nodes import NODE_DISPLAY_NAME_MAPPINGS
13
- import core.pipelines.controlnet_preprocessor as cn_module
14
-
15
- if cn_module.REVERSE_DISPLAY_NAME_MAP is None:
16
- cn_module.REVERSE_DISPLAY_NAME_MAP = {v: k for k, v in NODE_DISPLAY_NAME_MAPPINGS.items()}
17
- if "Semantic Segmentor (legacy, alias for UniFormer)" not in cn_module.REVERSE_DISPLAY_NAME_MAP:
18
- cn_module.REVERSE_DISPLAY_NAME_MAP["Semantic Segmentor (legacy, alias for UniFormer)"] = "SemSegPreprocessor"
19
-
20
-
21
- def run_cn_preprocessor_entry(*args, **kwargs):
22
- return controlnet_preprocessor_pipeline.run(*args, **kwargs)
23
-
24
  def generate_image_wrapper(ui_inputs: dict, progress=gr.Progress(track_tqdm=True)):
25
  return sd_image_pipeline.run(ui_inputs=ui_inputs, progress=progress)
 
1
  from typing import Any, Dict
2
  import gradio as gr
3
 
 
4
  from core.pipelines.sd_image_pipeline import SdImagePipeline
5
 
 
6
  sd_image_pipeline = SdImagePipeline()
7
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  def generate_image_wrapper(ui_inputs: dict, progress=gr.Progress(track_tqdm=True)):
10
  return sd_image_pipeline.run(ui_inputs=ui_inputs, progress=progress)
core/model_manager.py CHANGED
@@ -1,9 +1,8 @@
1
  import gc
2
  from typing import List
3
  import gradio as gr
4
-
5
- from core.settings import ALL_MODEL_MAP
6
  from utils.app_utils import _ensure_model_downloaded
 
7
 
8
  class ModelManager:
9
  _instance = None
@@ -21,25 +20,13 @@ class ModelManager:
21
 
22
  def ensure_models_downloaded(self, required_models: List[str], progress):
23
  print(f"--- [ModelManager] Ensuring models are downloaded: {required_models} ---")
24
-
25
- files_to_download = set()
26
- for display_name in required_models:
27
- if display_name in ALL_MODEL_MAP:
28
- _, components, _, _ = ALL_MODEL_MAP[display_name]
29
- for component_file in components.values():
30
- files_to_download.add(component_file)
31
-
32
- files_to_download = list(files_to_download)
33
- total_files = len(files_to_download)
34
-
35
- for i, filename in enumerate(files_to_download):
36
  if progress and hasattr(progress, '__call__'):
37
- progress(i / total_files if total_files > 0 else 0, desc=f"Checking file: {filename}")
38
  try:
39
- _ensure_model_downloaded(filename, progress)
40
  except Exception as e:
41
- raise gr.Error(f"Failed to download model component '{filename}'. Reason: {e}")
42
-
43
  print(f"--- [ModelManager] ✅ All required models are present on disk. ---")
44
-
45
  model_manager = ModelManager()
 
1
  import gc
2
  from typing import List
3
  import gradio as gr
 
 
4
  from utils.app_utils import _ensure_model_downloaded
5
+ from core.settings import ALL_MODEL_MAP
6
 
7
  class ModelManager:
8
  _instance = None
 
20
 
21
  def ensure_models_downloaded(self, required_models: List[str], progress):
22
  print(f"--- [ModelManager] Ensuring models are downloaded: {required_models} ---")
23
+ for i, display_name in enumerate(required_models):
 
 
 
 
 
 
 
 
 
 
 
24
  if progress and hasattr(progress, '__call__'):
25
+ progress(i / max(len(required_models), 1), desc=f"Checking file: {display_name}")
26
  try:
27
+ _ensure_model_downloaded(display_name, progress)
28
  except Exception as e:
29
+ raise gr.Error(f"Failed to download model '{display_name}'. Reason: {e}")
 
30
  print(f"--- [ModelManager] ✅ All required models are present on disk. ---")
31
+
32
  model_manager = ModelManager()
core/pipelines/controlnet_preprocessor.py DELETED
@@ -1,143 +0,0 @@
1
- from typing import Dict, Any, List
2
- import imageio
3
- import tempfile
4
- import numpy as np
5
- import torch
6
- import gradio as gr
7
- from PIL import Image
8
- import spaces
9
-
10
- from .base_pipeline import BasePipeline
11
- from comfy_integration.nodes import NODE_CLASS_MAPPINGS
12
- from nodes import NODE_DISPLAY_NAME_MAPPINGS
13
- from utils.app_utils import get_value_at_index
14
-
15
- REVERSE_DISPLAY_NAME_MAP = None
16
- CPU_ONLY_PREPROCESSORS = {
17
- "Binary Lines", "Canny Edge", "Color Pallete", "Fake Scribble Lines (aka scribble_hed)",
18
- "Image Intensity", "Image Luminance", "Inpaint Preprocessor", "PyraCanny", "Scribble Lines",
19
- "Scribble XDoG Lines", "Standard Lineart", "Content Shuffle", "Tile"
20
- }
21
-
22
- def run_node_by_function_name(node_instance: Any, **kwargs) -> Any:
23
- node_class = type(node_instance)
24
- function_name = getattr(node_class, 'FUNCTION', None)
25
- if not function_name:
26
- raise AttributeError(f"Node class '{node_class.__name__}' is missing the required 'FUNCTION' attribute.")
27
- execution_method = getattr(node_instance, function_name, None)
28
- if not callable(execution_method):
29
- raise AttributeError(f"Method '{function_name}' not found or not callable on node '{node_class.__name__}'.")
30
- return execution_method(**kwargs)
31
-
32
- class ControlNetPreprocessorPipeline(BasePipeline):
33
- def get_required_models(self, **kwargs) -> List[str]:
34
- return []
35
-
36
- def _gpu_logic(
37
- self, pil_images: List[Image.Image], preprocessor_name: str, model_name: str,
38
- params: Dict[str, Any], progress=gr.Progress(track_tqdm=True)
39
- ) -> List[Image.Image]:
40
- global REVERSE_DISPLAY_NAME_MAP
41
- if REVERSE_DISPLAY_NAME_MAP is None:
42
- raise RuntimeError("REVERSE_DISPLAY_NAME_MAP has not been initialized. `build_reverse_map` must be called on startup.")
43
-
44
- class_name = REVERSE_DISPLAY_NAME_MAP.get(preprocessor_name)
45
- if not class_name or class_name not in NODE_CLASS_MAPPINGS:
46
- raise ValueError(f"Preprocessor '{preprocessor_name}' not found.")
47
-
48
- preprocessor_instance = NODE_CLASS_MAPPINGS[class_name]()
49
- call_args = {**params, 'ckpt_name': model_name}
50
-
51
- processed_pil_images = []
52
- total_frames = len(pil_images)
53
-
54
- for i, frame_pil in enumerate(pil_images):
55
- progress(i / total_frames, desc=f"Processing frame {i+1}/{total_frames} with {preprocessor_name}...")
56
-
57
- frame_tensor = torch.from_numpy(np.array(frame_pil).astype(np.float32) / 255.0).unsqueeze(0)
58
-
59
- resolution_arg = {'resolution': max(frame_tensor.shape[2], frame_tensor.shape[3])}
60
-
61
- result_tuple = run_node_by_function_name(
62
- preprocessor_instance,
63
- image=frame_tensor,
64
- **resolution_arg,
65
- **call_args
66
- )
67
-
68
- processed_tensor = get_value_at_index(result_tuple, 0)
69
- processed_np = (processed_tensor.squeeze(0).cpu().numpy().clip(0, 1) * 255.0).astype(np.uint8)
70
- processed_pil_images.append(Image.fromarray(processed_np))
71
-
72
- return processed_pil_images
73
-
74
- def run(self, input_type, image_input, video_input, preprocessor_name, model_name, zero_gpu_duration, *args, progress=gr.Progress(track_tqdm=True)):
75
- from utils import app_utils
76
- pil_images, is_video, fps = [], False, 30
77
-
78
- progress(0, desc="Reading input file...")
79
- if input_type == "Image":
80
- if image_input is None: raise gr.Error("Please provide an input image.")
81
- pil_images = [image_input]
82
- elif input_type == "Video":
83
- if video_input is None: raise gr.Error("Please provide an input video.")
84
- try:
85
- video_reader = imageio.get_reader(video_input)
86
- meta = video_reader.get_meta_data()
87
- fps = meta.get('fps', 30)
88
- pil_images = [Image.fromarray(frame) for frame in video_reader]
89
- is_video = True
90
- video_reader.close()
91
- except Exception as e: raise gr.Error(f"Failed to read video file: {e}")
92
- else:
93
- raise gr.Error("Invalid input type selected.")
94
-
95
- if not pil_images: raise gr.Error("Could not extract any frames from the input.")
96
-
97
- if app_utils.PREPROCESSOR_PARAMETER_MAP is None:
98
- raise RuntimeError("Preprocessor parameter map is not built. Check startup logs.")
99
-
100
- params_config = app_utils.PREPROCESSOR_PARAMETER_MAP.get(preprocessor_name, [])
101
- sliders_params = [p for p in params_config if p['type'] in ["INT", "FLOAT"]]
102
- dropdown_params = [p for p in params_config if isinstance(p['type'], list)]
103
- checkbox_params = [p for p in params_config if p['type'] == "BOOLEAN"]
104
- ordered_params_config = sliders_params + dropdown_params + checkbox_params
105
- param_names = [p['name'] for p in ordered_params_config]
106
- provided_params = {param_names[i]: args[i] for i in range(len(param_names))}
107
-
108
- if preprocessor_name not in CPU_ONLY_PREPROCESSORS:
109
- print(f"--- '{preprocessor_name}' requires GPU, requesting ZeroGPU. ---")
110
- try:
111
- processed_pil_images = self._execute_gpu_logic(
112
- self._gpu_logic,
113
- duration=zero_gpu_duration,
114
- default_duration=60,
115
- task_name=f"Preprocessor '{preprocessor_name}'",
116
- pil_images=pil_images,
117
- preprocessor_name=preprocessor_name,
118
- model_name=model_name,
119
- params=provided_params,
120
- progress=progress
121
- )
122
- except Exception as e:
123
- import traceback; traceback.print_exc()
124
- raise gr.Error(f"Failed to run preprocessor '{preprocessor_name}' on GPU: {e}")
125
- else:
126
- print(f"--- Running '{preprocessor_name}' on CPU, no ZeroGPU requested. ---")
127
- try:
128
- processed_pil_images = self._gpu_logic(pil_images, preprocessor_name, model_name, provided_params, progress=progress)
129
- except Exception as e:
130
- import traceback; traceback.print_exc()
131
- raise gr.Error(f"Failed to run preprocessor '{preprocessor_name}' on CPU: {e}")
132
-
133
- if not processed_pil_images: raise gr.Error("Processing returned no frames.")
134
-
135
- progress(0.9, desc="Finalizing output...")
136
- if is_video:
137
- frames_np = [np.array(img) for img in processed_pil_images]
138
- frames_tensor = torch.from_numpy(np.stack(frames_np)).to(torch.float32) / 255.0
139
- video_path = self._encode_video_from_frames(frames_tensor, fps, progress)
140
- return [video_path]
141
- else:
142
- progress(1.0, desc="Done!")
143
- return processed_pil_images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
core/pipelines/sd_image_pipeline.py CHANGED
@@ -11,12 +11,20 @@ import numpy as np
11
  from .base_pipeline import BasePipeline
12
  from core.settings import *
13
  from comfy_integration.nodes import *
14
- from utils.app_utils import get_value_at_index, sanitize_prompt, get_lora_path, get_embedding_path, ensure_controlnet_model_downloaded, sanitize_filename
15
  from core.workflow_assembler import WorkflowAssembler
16
 
17
  class SdImagePipeline(BasePipeline):
18
  def get_required_models(self, model_display_name: str, **kwargs) -> List[str]:
19
- return [model_display_name]
 
 
 
 
 
 
 
 
20
 
21
  def _topological_sort(self, workflow: Dict[str, Any]) -> List[str]:
22
  graph = defaultdict(list)
@@ -47,7 +55,6 @@ class SdImagePipeline(BasePipeline):
47
 
48
  return sorted_nodes
49
 
50
-
51
  def _execute_workflow(self, workflow: Dict[str, Any], initial_objects: Dict[str, Any]):
52
  with torch.no_grad():
53
  computed_outputs = initial_objects
@@ -119,7 +126,7 @@ class SdImagePipeline(BasePipeline):
119
  progress(0.4, desc="Executing workflow...")
120
 
121
  initial_objects = {}
122
-
123
  decoded_images_tensor = self._execute_workflow(workflow, initial_objects=initial_objects)
124
 
125
  output_images = []
@@ -135,6 +142,7 @@ class SdImagePipeline(BasePipeline):
135
  params_string = f"{ui_inputs['positive_prompt']}\nNegative prompt: {ui_inputs['negative_prompt']}\n"
136
  params_string += f"Steps: {ui_inputs['num_inference_steps']}, Sampler: {ui_inputs['sampler']}, Scheduler: {ui_inputs['scheduler']}, CFG scale: {ui_inputs['guidance_scale']}, Seed: {current_seed}, Size: {width_for_meta}x{height_for_meta}, Base Model: {model_display_name}"
137
  if ui_inputs['task_type'] != 'txt2img': params_string += f", Denoise: {ui_inputs['denoise']}"
 
138
  if loras_string: params_string += f", {loras_string}"
139
 
140
  pil_image.info = {'parameters': params_string.strip()}
@@ -146,39 +154,46 @@ class SdImagePipeline(BasePipeline):
146
  progress(0, desc="Preparing models...")
147
 
148
  task_type = ui_inputs['task_type']
 
 
 
 
 
149
 
150
  ui_inputs['positive_prompt'] = sanitize_prompt(ui_inputs.get('positive_prompt', ''))
151
  ui_inputs['negative_prompt'] = sanitize_prompt(ui_inputs.get('negative_prompt', ''))
152
 
153
- required_models = self.get_required_models(model_display_name=ui_inputs['model_display_name'])
154
-
 
 
 
 
155
  self.model_manager.ensure_models_downloaded(required_models, progress=progress)
156
 
157
  lora_data = ui_inputs.get('lora_data', [])
158
  active_loras_for_gpu, active_loras_for_meta = [], []
159
  if lora_data:
160
  sources, ids, scales, files = lora_data[0::4], lora_data[1::4], lora_data[2::4], lora_data[3::4]
161
-
162
  for i, (source, lora_id, scale, _) in enumerate(zip(sources, ids, scales, files)):
163
  if scale > 0 and lora_id and lora_id.strip():
164
  lora_filename = None
165
  if source == "File":
166
  lora_filename = sanitize_filename(lora_id)
167
  elif source == "Civitai":
168
- local_path, status = get_lora_path(source, lora_id, ui_inputs['civitai_api_key'], progress)
169
  if local_path: lora_filename = os.path.basename(local_path)
170
  else: raise gr.Error(f"Failed to prepare LoRA {lora_id}: {status}")
171
 
172
  if lora_filename:
173
  active_loras_for_gpu.append({"lora_name": lora_filename, "strength_model": scale, "strength_clip": scale})
174
  active_loras_for_meta.append(f"{source} {lora_id}:{scale}")
175
-
176
  ui_inputs['denoise'] = 1.0
177
  if task_type == 'img2img': ui_inputs['denoise'] = ui_inputs.get('img2img_denoise', 0.7)
178
  elif task_type == 'hires_fix': ui_inputs['denoise'] = ui_inputs.get('hires_denoise', 0.55)
179
 
180
  temp_files_to_clean = []
181
-
182
  if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
183
 
184
  if task_type == 'img2img':
@@ -197,7 +212,6 @@ class SdImagePipeline(BasePipeline):
197
  raise gr.Error("Inpainting requires an input image and a drawn mask.")
198
 
199
  background_img = inpaint_dict['background'].convert("RGBA")
200
-
201
  composite_mask_pil = Image.new('L', background_img.size, 0)
202
  for layer in inpaint_dict['layers']:
203
  if layer:
@@ -211,7 +225,7 @@ class SdImagePipeline(BasePipeline):
211
  temp_file_path = os.path.join(INPUT_DIR, f"temp_inpaint_composite_{random.randint(1000, 9999)}.png")
212
  composite_image_with_mask.save(temp_file_path, "PNG")
213
 
214
- ui_inputs['inpaint_image'] = os.path.basename(temp_file_path)
215
  temp_files_to_clean.append(temp_file_path)
216
  ui_inputs.pop('inpaint_mask', None)
217
 
@@ -222,6 +236,9 @@ class SdImagePipeline(BasePipeline):
222
  input_image_pil.save(temp_file_path, "PNG")
223
  ui_inputs['input_image'] = os.path.basename(temp_file_path)
224
  temp_files_to_clean.append(temp_file_path)
 
 
 
225
 
226
  elif task_type == 'hires_fix':
227
  input_image_pil = ui_inputs.get('hires_image')
@@ -241,7 +258,7 @@ class SdImagePipeline(BasePipeline):
241
  if source == "File":
242
  emb_filename = sanitize_filename(emb_id)
243
  elif source == "Civitai":
244
- local_path, status = get_embedding_path(source, emb_id, ui_inputs['civitai_api_key'], progress)
245
  if local_path: emb_filename = os.path.basename(local_path)
246
  else: raise gr.Error(f"Failed to prepare Embedding {emb_id}: {status}")
247
 
@@ -255,20 +272,162 @@ class SdImagePipeline(BasePipeline):
255
  else:
256
  ui_inputs['positive_prompt'] = embedding_prompt_text
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  from utils.app_utils import get_vae_path
259
  vae_source = ui_inputs.get('vae_source')
260
  vae_id = ui_inputs.get('vae_id')
261
- vae_file = ui_inputs.get('vae_file')
262
  vae_name_override = None
263
-
264
  if vae_source and vae_source != "None":
265
  if vae_source == "File":
266
  vae_name_override = sanitize_filename(vae_id)
267
  elif vae_source == "Civitai" and vae_id and vae_id.strip():
268
- local_path, status = get_vae_path(vae_source, vae_id, ui_inputs.get('civitai_api_key'), progress)
269
  if local_path: vae_name_override = os.path.basename(local_path)
270
  else: raise gr.Error(f"Failed to prepare VAE {vae_id}: {status}")
271
-
272
  if vae_name_override:
273
  ui_inputs['vae_name'] = vae_name_override
274
 
@@ -276,78 +435,84 @@ class SdImagePipeline(BasePipeline):
276
  active_conditioning = []
277
  if conditioning_data:
278
  num_units = len(conditioning_data) // 6
279
- prompts = conditioning_data[0*num_units : 1*num_units]
280
- widths = conditioning_data[1*num_units : 2*num_units]
281
- heights = conditioning_data[2*num_units : 3*num_units]
282
- xs = conditioning_data[3*num_units : 4*num_units]
283
- ys = conditioning_data[4*num_units : 5*num_units]
284
- strengths = conditioning_data[5*num_units : 6*num_units]
285
-
286
  for i in range(num_units):
287
  if prompts[i] and prompts[i].strip():
288
  active_conditioning.append({
289
- "prompt": prompts[i],
290
- "width": int(widths[i]),
291
- "height": int(heights[i]),
292
- "x": int(xs[i]),
293
- "y": int(ys[i]),
294
- "strength": float(strengths[i])
295
  })
296
 
297
- reference_latent_data = ui_inputs.get('reference_latent_data', [])
298
- active_reference_latents = []
299
- if reference_latent_data:
300
- for img_pil in reference_latent_data:
301
- if img_pil is not None:
302
- temp_file_path = os.path.join(INPUT_DIR, f"temp_ref_{random.randint(1000, 9999)}.png")
303
- img_pil.save(temp_file_path, "PNG")
304
- active_reference_latents.append(os.path.basename(temp_file_path))
305
- temp_files_to_clean.append(temp_file_path)
306
-
307
  loras_string = f"LoRAs: [{', '.join(active_loras_for_meta)}]" if active_loras_for_meta else ""
308
 
309
  progress(0.8, desc="Assembling workflow...")
310
 
311
  if ui_inputs.get('seed') == -1:
312
  ui_inputs['seed'] = random.randint(0, 2**32 - 1)
 
 
 
 
 
 
 
 
 
 
 
313
 
314
- dynamic_values = {'task_type': ui_inputs['task_type']}
 
 
 
 
 
315
 
316
  recipe_path = os.path.join(os.path.dirname(__file__), "workflow_recipes", "sd_unified_recipe.yaml")
317
  assembler = WorkflowAssembler(recipe_path, dynamic_values=dynamic_values)
318
 
319
- model_display_name = ui_inputs['model_display_name']
320
- if model_display_name not in ALL_MODEL_MAP:
321
- raise gr.Error(f"Model '{model_display_name}' is not configured in model_list.yaml.")
322
-
323
- _, components, _, _ = ALL_MODEL_MAP[model_display_name]
324
-
325
  workflow_inputs = {
 
326
  "positive_prompt": ui_inputs['positive_prompt'], "negative_prompt": ui_inputs['negative_prompt'],
327
  "seed": ui_inputs['seed'], "steps": ui_inputs['num_inference_steps'], "cfg": ui_inputs['guidance_scale'],
328
  "sampler_name": ui_inputs['sampler'], "scheduler": ui_inputs['scheduler'],
329
  "batch_size": ui_inputs['batch_size'],
330
- "denoise": ui_inputs['denoise'],
331
- "input_image": ui_inputs.get('input_image'),
332
- "inpaint_image": ui_inputs.get('inpaint_image'),
333
- "inpaint_mask": ui_inputs.get('inpaint_mask'),
334
- "left": ui_inputs.get('outpaint_left'), "top": ui_inputs.get('outpaint_top'),
335
- "right": ui_inputs.get('outpaint_right'), "bottom": ui_inputs.get('outpaint_bottom'),
336
- "hires_upscaler": ui_inputs.get('hires_upscaler'), "hires_scale_by": ui_inputs.get('hires_scale_by'),
337
- "unet_name": components['unet'],
338
- "clip_name": components['clip'],
339
- "vae_name": ui_inputs.get('vae_name', components['vae']),
340
  "lora_chain": active_loras_for_gpu,
 
 
 
 
 
 
341
  "conditioning_chain": active_conditioning,
342
  "reference_latent_chain": active_reference_latents,
 
343
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344
 
345
  if task_type == 'txt2img':
346
  workflow_inputs['width'] = ui_inputs['width']
347
  workflow_inputs['height'] = ui_inputs['height']
348
 
349
  workflow = assembler.assemble(workflow_inputs)
350
-
351
  progress(1.0, desc="All models ready. Requesting GPU for generation...")
352
 
353
  try:
@@ -362,7 +527,7 @@ class SdImagePipeline(BasePipeline):
362
  assembler=assembler,
363
  progress=progress
364
  )
365
-
366
  import json
367
  import glob
368
  from PIL import PngImagePlugin
 
11
  from .base_pipeline import BasePipeline
12
  from core.settings import *
13
  from comfy_integration.nodes import *
14
+ from utils.app_utils import get_value_at_index, sanitize_prompt, get_lora_path, get_embedding_path, ensure_controlnet_model_downloaded, ensure_ipadapter_models_downloaded, sanitize_filename
15
  from core.workflow_assembler import WorkflowAssembler
16
 
17
  class SdImagePipeline(BasePipeline):
18
  def get_required_models(self, model_display_name: str, **kwargs) -> List[str]:
19
+ model_info = ALL_MODEL_MAP.get(model_display_name)
20
+ if not model_info:
21
+ return [model_display_name]
22
+
23
+ path_or_components = model_info[1]
24
+ if isinstance(path_or_components, dict):
25
+ return [v for v in path_or_components.values() if v and v != "pixel_space"]
26
+ else:
27
+ return [model_display_name]
28
 
29
  def _topological_sort(self, workflow: Dict[str, Any]) -> List[str]:
30
  graph = defaultdict(list)
 
55
 
56
  return sorted_nodes
57
 
 
58
  def _execute_workflow(self, workflow: Dict[str, Any], initial_objects: Dict[str, Any]):
59
  with torch.no_grad():
60
  computed_outputs = initial_objects
 
126
  progress(0.4, desc="Executing workflow...")
127
 
128
  initial_objects = {}
129
+
130
  decoded_images_tensor = self._execute_workflow(workflow, initial_objects=initial_objects)
131
 
132
  output_images = []
 
142
  params_string = f"{ui_inputs['positive_prompt']}\nNegative prompt: {ui_inputs['negative_prompt']}\n"
143
  params_string += f"Steps: {ui_inputs['num_inference_steps']}, Sampler: {ui_inputs['sampler']}, Scheduler: {ui_inputs['scheduler']}, CFG scale: {ui_inputs['guidance_scale']}, Seed: {current_seed}, Size: {width_for_meta}x{height_for_meta}, Base Model: {model_display_name}"
144
  if ui_inputs['task_type'] != 'txt2img': params_string += f", Denoise: {ui_inputs['denoise']}"
145
+ if ui_inputs.get('clip_skip') and ui_inputs['clip_skip'] != 1: params_string += f", Clip skip: {abs(ui_inputs['clip_skip'])}"
146
  if loras_string: params_string += f", {loras_string}"
147
 
148
  pil_image.info = {'parameters': params_string.strip()}
 
154
  progress(0, desc="Preparing models...")
155
 
156
  task_type = ui_inputs['task_type']
157
+ model_display_name = ui_inputs['model_display_name']
158
+ model_type = MODEL_TYPE_MAP.get(model_display_name, 'sdxl')
159
+
160
+ architectures_dict = ARCHITECTURES_CONFIG.get('architectures', {})
161
+ workflow_model_type = architectures_dict.get(model_type, {}).get("model_type", "sdxl")
162
 
163
  ui_inputs['positive_prompt'] = sanitize_prompt(ui_inputs.get('positive_prompt', ''))
164
  ui_inputs['negative_prompt'] = sanitize_prompt(ui_inputs.get('negative_prompt', ''))
165
 
166
+ if 'clip_skip' in ui_inputs and ui_inputs['clip_skip'] is not None:
167
+ ui_inputs['clip_skip'] = -int(ui_inputs['clip_skip'])
168
+ else:
169
+ ui_inputs['clip_skip'] = -1
170
+
171
+ required_models = self.get_required_models(model_display_name=model_display_name)
172
  self.model_manager.ensure_models_downloaded(required_models, progress=progress)
173
 
174
  lora_data = ui_inputs.get('lora_data', [])
175
  active_loras_for_gpu, active_loras_for_meta = [], []
176
  if lora_data:
177
  sources, ids, scales, files = lora_data[0::4], lora_data[1::4], lora_data[2::4], lora_data[3::4]
 
178
  for i, (source, lora_id, scale, _) in enumerate(zip(sources, ids, scales, files)):
179
  if scale > 0 and lora_id and lora_id.strip():
180
  lora_filename = None
181
  if source == "File":
182
  lora_filename = sanitize_filename(lora_id)
183
  elif source == "Civitai":
184
+ local_path, status = get_lora_path(source, lora_id, os.environ.get("CIVITAI_API_KEY", ""), progress)
185
  if local_path: lora_filename = os.path.basename(local_path)
186
  else: raise gr.Error(f"Failed to prepare LoRA {lora_id}: {status}")
187
 
188
  if lora_filename:
189
  active_loras_for_gpu.append({"lora_name": lora_filename, "strength_model": scale, "strength_clip": scale})
190
  active_loras_for_meta.append(f"{source} {lora_id}:{scale}")
191
+
192
  ui_inputs['denoise'] = 1.0
193
  if task_type == 'img2img': ui_inputs['denoise'] = ui_inputs.get('img2img_denoise', 0.7)
194
  elif task_type == 'hires_fix': ui_inputs['denoise'] = ui_inputs.get('hires_denoise', 0.55)
195
 
196
  temp_files_to_clean = []
 
197
  if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
198
 
199
  if task_type == 'img2img':
 
212
  raise gr.Error("Inpainting requires an input image and a drawn mask.")
213
 
214
  background_img = inpaint_dict['background'].convert("RGBA")
 
215
  composite_mask_pil = Image.new('L', background_img.size, 0)
216
  for layer in inpaint_dict['layers']:
217
  if layer:
 
225
  temp_file_path = os.path.join(INPUT_DIR, f"temp_inpaint_composite_{random.randint(1000, 9999)}.png")
226
  composite_image_with_mask.save(temp_file_path, "PNG")
227
 
228
+ ui_inputs['input_image'] = os.path.basename(temp_file_path)
229
  temp_files_to_clean.append(temp_file_path)
230
  ui_inputs.pop('inpaint_mask', None)
231
 
 
236
  input_image_pil.save(temp_file_path, "PNG")
237
  ui_inputs['input_image'] = os.path.basename(temp_file_path)
238
  temp_files_to_clean.append(temp_file_path)
239
+
240
+ ui_inputs['megapixels'] = 0.25
241
+ ui_inputs['grow_mask_by'] = ui_inputs.get('feathering', 10)
242
 
243
  elif task_type == 'hires_fix':
244
  input_image_pil = ui_inputs.get('hires_image')
 
258
  if source == "File":
259
  emb_filename = sanitize_filename(emb_id)
260
  elif source == "Civitai":
261
+ local_path, status = get_embedding_path(source, emb_id, os.environ.get("CIVITAI_API_KEY", ""), progress)
262
  if local_path: emb_filename = os.path.basename(local_path)
263
  else: raise gr.Error(f"Failed to prepare Embedding {emb_id}: {status}")
264
 
 
272
  else:
273
  ui_inputs['positive_prompt'] = embedding_prompt_text
274
 
275
+ controlnet_data = ui_inputs.get('controlnet_data', [])
276
+ active_controlnets = []
277
+ if controlnet_data:
278
+ (cn_images, _, _, cn_strengths, cn_filepaths) = [controlnet_data[i::5] for i in range(5)]
279
+ for i in range(len(cn_images)):
280
+ if cn_images[i] and cn_strengths[i] > 0 and cn_filepaths[i] and cn_filepaths[i] != "None":
281
+ ensure_controlnet_model_downloaded(cn_filepaths[i], progress)
282
+ if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
283
+ cn_temp_path = os.path.join(INPUT_DIR, f"temp_cn_{i}_{random.randint(1000, 9999)}.png")
284
+ cn_images[i].save(cn_temp_path, "PNG")
285
+ temp_files_to_clean.append(cn_temp_path)
286
+ active_controlnets.append({
287
+ "image": os.path.basename(cn_temp_path), "strength": cn_strengths[i],
288
+ "start_percent": 0.0, "end_percent": 1.0, "control_net_name": cn_filepaths[i]
289
+ })
290
+
291
+ diffsynth_controlnet_data = ui_inputs.get('diffsynth_controlnet_data', [])
292
+ active_diffsynth_controlnets = []
293
+ if diffsynth_controlnet_data:
294
+ (cn_images, _, _, cn_strengths, cn_filepaths) = [diffsynth_controlnet_data[i::5] for i in range(5)]
295
+ for i in range(len(cn_images)):
296
+ if cn_images[i] and cn_strengths[i] > 0 and cn_filepaths[i] and cn_filepaths[i] != "None":
297
+ ensure_controlnet_model_downloaded(cn_filepaths[i], progress)
298
+
299
+ if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
300
+ cn_temp_path = os.path.join(INPUT_DIR, f"temp_diffsynth_cn_{i}_{random.randint(1000, 9999)}.png")
301
+ cn_images[i].save(cn_temp_path, "PNG")
302
+ temp_files_to_clean.append(cn_temp_path)
303
+ active_diffsynth_controlnets.append({
304
+ "image": os.path.basename(cn_temp_path), "strength": cn_strengths[i],
305
+ "control_net_name": cn_filepaths[i]
306
+ })
307
+
308
+ ipadapter_data = ui_inputs.get('ipadapter_data', [])
309
+ active_ipadapters = []
310
+ if ipadapter_data:
311
+ num_ipa_units = (len(ipadapter_data) - 5) // 3
312
+ final_preset, final_weight, final_lora_strength, final_embeds_scaling, final_combine_method = ipadapter_data[-5:]
313
+ ipa_images, ipa_weights, ipa_lora_strengths = [ipadapter_data[i*num_ipa_units:(i+1)*num_ipa_units] for i in range(3)]
314
+ all_presets_to_download = set()
315
+ for i in range(num_ipa_units):
316
+ if ipa_images[i] and ipa_weights[i] > 0 and final_preset:
317
+ all_presets_to_download.add(final_preset)
318
+ if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
319
+ ipa_temp_path = os.path.join(INPUT_DIR, f"temp_ipa_{i}_{random.randint(1000, 9999)}.png")
320
+ ipa_images[i].save(ipa_temp_path, "PNG")
321
+ temp_files_to_clean.append(ipa_temp_path)
322
+ active_ipadapters.append({
323
+ "image": os.path.basename(ipa_temp_path), "preset": final_preset,
324
+ "weight": ipa_weights[i], "lora_strength": ipa_lora_strengths[i]
325
+ })
326
+ if active_ipadapters and final_preset:
327
+ all_presets_to_download.add(final_preset)
328
+ for preset in all_presets_to_download:
329
+ ensure_ipadapter_models_downloaded(preset, progress)
330
+
331
+ model_type_key = 'sd15' if workflow_model_type == 'sd15' else 'sdxl'
332
+ if active_ipadapters:
333
+ active_ipadapters.append({
334
+ 'is_final_settings': True, 'model_type': model_type_key, 'final_preset': final_preset,
335
+ 'final_weight': final_weight, 'final_lora_strength': final_lora_strength,
336
+ 'final_embeds_scaling': final_embeds_scaling, 'final_combine_method': final_combine_method
337
+ })
338
+
339
+ flux1_ipadapter_data = ui_inputs.get('flux1_ipadapter_data', [])
340
+ active_flux1_ipadapters = []
341
+ if flux1_ipadapter_data:
342
+ num_units = len(flux1_ipadapter_data) // 4
343
+ f_images = flux1_ipadapter_data[0*num_units : 1*num_units]
344
+ f_weights = flux1_ipadapter_data[1*num_units : 2*num_units]
345
+ f_starts = flux1_ipadapter_data[2*num_units : 3*num_units]
346
+ f_ends = flux1_ipadapter_data[3*num_units : 4*num_units]
347
+ for i in range(len(f_images)):
348
+ if f_images[i] and f_weights[i] > 0:
349
+ from utils.app_utils import _ensure_model_downloaded
350
+ for filename in ["ip-adapter.bin"]:
351
+ _ensure_model_downloaded(filename, progress)
352
+
353
+ from huggingface_hub import snapshot_download
354
+ progress(0.5, desc="Caching HF SigLIP model...")
355
+ snapshot_download(
356
+ repo_id="google/siglip-so400m-patch14-384",
357
+ allow_patterns=["*.json", "*.safetensors", "*.txt"],
358
+ ignore_patterns=["*.msgpack", "*.h5", "*.bin"]
359
+ )
360
+
361
+ temp_path = os.path.join(INPUT_DIR, f"temp_fipa_{i}_{random.randint(1000, 9999)}.png")
362
+ f_images[i].save(temp_path, "PNG")
363
+ temp_files_to_clean.append(temp_path)
364
+ active_flux1_ipadapters.append({
365
+ "image": os.path.basename(temp_path),
366
+ "weight": f_weights[i], "start_percent": f_starts[i], "end_percent": f_ends[i]
367
+ })
368
+
369
+ sd3_ipadapter_data = ui_inputs.get('sd3_ipadapter_chain', [])
370
+ active_sd3_ipadapters = []
371
+ if sd3_ipadapter_data:
372
+ num_units = len(sd3_ipadapter_data) // 4
373
+ s_images = sd3_ipadapter_data[0*num_units : 1*num_units]
374
+ s_weights = sd3_ipadapter_data[1*num_units : 2*num_units]
375
+ s_starts = sd3_ipadapter_data[2*num_units : 3*num_units]
376
+ s_ends = sd3_ipadapter_data[3*num_units : 4*num_units]
377
+ sd3_ipa_downloaded = False
378
+ for i in range(len(s_images)):
379
+ if s_images[i] and s_weights[i] > 0:
380
+ if not sd3_ipa_downloaded:
381
+ from utils.app_utils import ensure_sd3_ipadapter_models_downloaded
382
+ ensure_sd3_ipadapter_models_downloaded(progress)
383
+ sd3_ipa_downloaded = True
384
+ temp_path = os.path.join(INPUT_DIR, f"temp_s3ipa_{i}_{random.randint(1000, 9999)}.png")
385
+ s_images[i].save(temp_path, "PNG")
386
+ temp_files_to_clean.append(temp_path)
387
+ active_sd3_ipadapters.append({
388
+ "image": os.path.basename(temp_path),
389
+ "weight": s_weights[i], "start_percent": s_starts[i], "end_percent": s_ends[i]
390
+ })
391
+
392
+ style_data = ui_inputs.get('style_data', [])
393
+ active_styles = []
394
+ if style_data:
395
+ num_units = len(style_data) // 2
396
+ st_images = style_data[0*num_units : 1*num_units]
397
+ st_strengths = style_data[1*num_units : 2*num_units]
398
+ for i in range(len(st_images)):
399
+ if st_images[i] and st_strengths[i] > 0:
400
+ from utils.app_utils import _ensure_model_downloaded
401
+ _ensure_model_downloaded("sigclip_vision_patch14_384.safetensors", progress)
402
+ temp_path = os.path.join(INPUT_DIR, f"temp_style_{i}_{random.randint(1000, 9999)}.png")
403
+ st_images[i].save(temp_path, "PNG")
404
+ temp_files_to_clean.append(temp_path)
405
+ active_styles.append({
406
+ "image": os.path.basename(temp_path), "strength": st_strengths[i]
407
+ })
408
+
409
+ reference_latent_data = ui_inputs.get('reference_latent_data', [])
410
+ active_reference_latents = []
411
+ if reference_latent_data:
412
+ for img in reference_latent_data:
413
+ if img:
414
+ if not os.path.exists(INPUT_DIR): os.makedirs(INPUT_DIR)
415
+ temp_path = os.path.join(INPUT_DIR, f"temp_ref_{random.randint(1000, 9999)}.png")
416
+ img.save(temp_path, "PNG")
417
+ temp_files_to_clean.append(temp_path)
418
+ active_reference_latents.append(os.path.basename(temp_path))
419
+
420
  from utils.app_utils import get_vae_path
421
  vae_source = ui_inputs.get('vae_source')
422
  vae_id = ui_inputs.get('vae_id')
 
423
  vae_name_override = None
 
424
  if vae_source and vae_source != "None":
425
  if vae_source == "File":
426
  vae_name_override = sanitize_filename(vae_id)
427
  elif vae_source == "Civitai" and vae_id and vae_id.strip():
428
+ local_path, status = get_vae_path(vae_source, vae_id, os.environ.get("CIVITAI_API_KEY", ""), progress)
429
  if local_path: vae_name_override = os.path.basename(local_path)
430
  else: raise gr.Error(f"Failed to prepare VAE {vae_id}: {status}")
 
431
  if vae_name_override:
432
  ui_inputs['vae_name'] = vae_name_override
433
 
 
435
  active_conditioning = []
436
  if conditioning_data:
437
  num_units = len(conditioning_data) // 6
438
+ prompts, widths, heights, xs, ys, strengths = [conditioning_data[i*num_units : (i+1)*num_units] for i in range(6)]
 
 
 
 
 
 
439
  for i in range(num_units):
440
  if prompts[i] and prompts[i].strip():
441
  active_conditioning.append({
442
+ "prompt": prompts[i], "width": int(widths[i]), "height": int(heights[i]),
443
+ "x": int(xs[i]), "y": int(ys[i]), "strength": float(strengths[i])
 
 
 
 
444
  })
445
 
 
 
 
 
 
 
 
 
 
 
446
  loras_string = f"LoRAs: [{', '.join(active_loras_for_meta)}]" if active_loras_for_meta else ""
447
 
448
  progress(0.8, desc="Assembling workflow...")
449
 
450
  if ui_inputs.get('seed') == -1:
451
  ui_inputs['seed'] = random.randint(0, 2**32 - 1)
452
+
453
+ model_info = ALL_MODEL_MAP[model_display_name]
454
+ path_or_components = model_info[1]
455
+ latent_type = model_info[3] if len(model_info) > 3 and model_info[3] else 'latent'
456
+ latent_generator_template = "EmptyLatentImage"
457
+ if latent_type == 'sd3_latent':
458
+ latent_generator_template = "EmptySD3LatentImage"
459
+ elif latent_type == 'chroma_radiance_latent':
460
+ latent_generator_template = "EmptyChromaRadianceLatentImage"
461
+ elif latent_type == 'hunyuan_latent':
462
+ latent_generator_template = "EmptyHunyuanImageLatent"
463
 
464
+ dynamic_values = {
465
+ 'task_type': ui_inputs['task_type'],
466
+ 'model_type': workflow_model_type,
467
+ 'latent_type': latent_type,
468
+ 'latent_generator_template': latent_generator_template
469
+ }
470
 
471
  recipe_path = os.path.join(os.path.dirname(__file__), "workflow_recipes", "sd_unified_recipe.yaml")
472
  assembler = WorkflowAssembler(recipe_path, dynamic_values=dynamic_values)
473
 
 
 
 
 
 
 
474
  workflow_inputs = {
475
+ **ui_inputs,
476
  "positive_prompt": ui_inputs['positive_prompt'], "negative_prompt": ui_inputs['negative_prompt'],
477
  "seed": ui_inputs['seed'], "steps": ui_inputs['num_inference_steps'], "cfg": ui_inputs['guidance_scale'],
478
  "sampler_name": ui_inputs['sampler'], "scheduler": ui_inputs['scheduler'],
479
  "batch_size": ui_inputs['batch_size'],
480
+ "clip_skip": ui_inputs['clip_skip'],
481
+ "denoise": ui_inputs['denoise'],
482
+ "vae_name": ui_inputs.get('vae_name'),
483
+ "guidance": ui_inputs.get('guidance', 3.5),
 
 
 
 
 
 
484
  "lora_chain": active_loras_for_gpu,
485
+ "controlnet_chain": active_controlnets,
486
+ "diffsynth_controlnet_chain": active_diffsynth_controlnets,
487
+ "ipadapter_chain": active_ipadapters,
488
+ "flux1_ipadapter_chain": active_flux1_ipadapters,
489
+ "sd3_ipadapter_chain": active_sd3_ipadapters,
490
+ "style_chain": active_styles,
491
  "conditioning_chain": active_conditioning,
492
  "reference_latent_chain": active_reference_latents,
493
+ "vae_chain": [ui_inputs.get('vae_name')] if ui_inputs.get('vae_name') else [],
494
  }
495
+
496
+ if isinstance(path_or_components, dict):
497
+ workflow_inputs.update({
498
+ 'unet_name': path_or_components.get('unet'),
499
+ 'vae_name': ui_inputs.get('vae_name') or path_or_components.get('vae'),
500
+ 'clip_name': path_or_components.get('clip'),
501
+ 'clip1_name': path_or_components.get('clip1'),
502
+ 'clip2_name': path_or_components.get('clip2'),
503
+ 'clip3_name': path_or_components.get('clip3'),
504
+ 'clip4_name': path_or_components.get('clip4'),
505
+ 'lora_name': path_or_components.get('lora'),
506
+ })
507
+ else:
508
+ workflow_inputs['model_name'] = path_or_components
509
 
510
  if task_type == 'txt2img':
511
  workflow_inputs['width'] = ui_inputs['width']
512
  workflow_inputs['height'] = ui_inputs['height']
513
 
514
  workflow = assembler.assemble(workflow_inputs)
515
+
516
  progress(1.0, desc="All models ready. Requesting GPU for generation...")
517
 
518
  try:
 
527
  assembler=assembler,
528
  progress=progress
529
  )
530
+
531
  import json
532
  import glob
533
  from PIL import PngImagePlugin
core/pipelines/workflow_recipes/_partials/{_base_sampler.yaml → _base_sampler_sd.yaml} RENAMED
@@ -1,11 +1,21 @@
1
  nodes:
 
 
 
 
 
 
2
  ksampler:
3
  class_type: KSampler
4
-
 
 
5
  vae_decode:
6
  class_type: VAEDecode
 
7
  save_image:
8
  class_type: SaveImage
 
9
  params: {}
10
 
11
  connections:
@@ -15,9 +25,12 @@ connections:
15
  to: "save_image:images"
16
 
17
  ui_map:
 
 
18
  seed: "ksampler:seed"
19
  steps: "ksampler:steps"
20
  cfg: "ksampler:cfg"
21
  sampler_name: "ksampler:sampler_name"
22
  scheduler: "ksampler:scheduler"
23
- denoise: "ksampler:denoise"
 
 
1
  nodes:
2
+ pos_prompt:
3
+ class_type: CLIPTextEncode
4
+ title: "CLIP Text Encode (Positive)"
5
+ neg_prompt:
6
+ class_type: CLIPTextEncode
7
+ title: "CLIP Text Encode (Negative)"
8
  ksampler:
9
  class_type: KSampler
10
+ title: "KSampler"
11
+ params:
12
+ denoise: 1.0
13
  vae_decode:
14
  class_type: VAEDecode
15
+ title: "VAE Decode"
16
  save_image:
17
  class_type: SaveImage
18
+ title: "Save Image"
19
  params: {}
20
 
21
  connections:
 
25
  to: "save_image:images"
26
 
27
  ui_map:
28
+ positive_prompt: "pos_prompt:text"
29
+ negative_prompt: "neg_prompt:text"
30
  seed: "ksampler:seed"
31
  steps: "ksampler:steps"
32
  cfg: "ksampler:cfg"
33
  sampler_name: "ksampler:sampler_name"
34
  scheduler: "ksampler:scheduler"
35
+ denoise: "ksampler:denoise"
36
+ filename_prefix: "save_image:filename_prefix"
core/pipelines/workflow_recipes/_partials/conditioning/anima.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load VAE"
10
+ clip_loader:
11
+ class_type: CLIPLoader
12
+ title: "Load CLIP"
13
+ params:
14
+ type: "stable_diffusion"
15
+ device: "default"
16
+
17
+ connections:
18
+ - from: "unet_loader:0"
19
+ to: "ksampler:model"
20
+ - from: "clip_loader:0"
21
+ to: "pos_prompt:clip"
22
+ - from: "clip_loader:0"
23
+ to: "neg_prompt:clip"
24
+ - from: "vae_loader:0"
25
+ to: "vae_decode:vae"
26
+ - from: "vae_loader:0"
27
+ to: "vae_encode:vae"
28
+ - from: "pos_prompt:0"
29
+ to: "ksampler:positive"
30
+ - from: "neg_prompt:0"
31
+ to: "ksampler:negative"
32
+
33
+ dynamic_lora_chains:
34
+ lora_chain:
35
+ template: "LoraLoader"
36
+ output_map:
37
+ "unet_loader:0": "model"
38
+ "clip_loader:0": "clip"
39
+ input_map:
40
+ "model": "model"
41
+ "clip": "clip"
42
+ end_input_map:
43
+ "model": ["ksampler:model"]
44
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
45
+
46
+ dynamic_conditioning_chains:
47
+ conditioning_chain:
48
+ ksampler_node: "ksampler"
49
+ clip_source: "clip_loader:0"
50
+
51
+ ui_map:
52
+ unet_name: "unet_loader:unet_name"
53
+ vae_name: "vae_loader:vae_name"
54
+ clip_name: "clip_loader:clip_name"
core/pipelines/workflow_recipes/_partials/conditioning/chroma1-radiance.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load VAE"
10
+ params:
11
+ vae_name: "pixel_space"
12
+ clip_loader:
13
+ class_type: CLIPLoader
14
+ title: "Load CLIP"
15
+ params:
16
+ type: "chroma"
17
+ device: "default"
18
+ t5_tokenizer:
19
+ class_type: T5TokenizerOptions
20
+ title: "T5TokenizerOptions"
21
+ params:
22
+ min_padding: 0
23
+ min_length: 3
24
+ model_sampler:
25
+ class_type: ModelSamplingAuraFlow
26
+ params:
27
+ shift: 3.0
28
+
29
+ connections:
30
+ - from: "unet_loader:0"
31
+ to: "model_sampler:model"
32
+ - from: "model_sampler:0"
33
+ to: "ksampler:model"
34
+
35
+ - from: "clip_loader:0"
36
+ to: "t5_tokenizer:clip"
37
+ - from: "t5_tokenizer:0"
38
+ to: "pos_prompt:clip"
39
+ - from: "t5_tokenizer:0"
40
+ to: "neg_prompt:clip"
41
+
42
+ - from: "pos_prompt:0"
43
+ to: "ksampler:positive"
44
+ - from: "neg_prompt:0"
45
+ to: "ksampler:negative"
46
+
47
+ - from: "vae_loader:0"
48
+ to: "vae_decode:vae"
49
+ - from: "vae_loader:0"
50
+ to: "vae_encode:vae"
51
+
52
+ dynamic_conditioning_chains:
53
+ conditioning_chain:
54
+ ksampler_node: "ksampler"
55
+ clip_source: "t5_tokenizer:0"
56
+
57
+ ui_map:
58
+ unet_name: "unet_loader:unet_name"
59
+ clip_name: "clip_loader:clip_name"
core/pipelines/workflow_recipes/_partials/conditioning/chroma1.yaml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load VAE"
10
+ clip_loader:
11
+ class_type: CLIPLoader
12
+ title: "Load CLIP"
13
+ params:
14
+ type: "chroma"
15
+ device: "default"
16
+ t5_tokenizer:
17
+ class_type: T5TokenizerOptions
18
+ title: "T5TokenizerOptions"
19
+ params:
20
+ min_padding: 1
21
+ min_length: 0
22
+ fresca:
23
+ class_type: FreSca
24
+ title: "FreSca"
25
+ params:
26
+ scale_low: 1.0
27
+ scale_high: 2.5
28
+ freq_cutoff: 30
29
+
30
+ connections:
31
+ - from: "unet_loader:0"
32
+ to: "fresca:model"
33
+ - from: "fresca:0"
34
+ to: "ksampler:model"
35
+
36
+ - from: "clip_loader:0"
37
+ to: "t5_tokenizer:clip"
38
+ - from: "t5_tokenizer:0"
39
+ to: "pos_prompt:clip"
40
+ - from: "t5_tokenizer:0"
41
+ to: "neg_prompt:clip"
42
+
43
+ - from: "pos_prompt:0"
44
+ to: "ksampler:positive"
45
+ - from: "neg_prompt:0"
46
+ to: "ksampler:negative"
47
+
48
+ - from: "vae_loader:0"
49
+ to: "vae_decode:vae"
50
+ - from: "vae_loader:0"
51
+ to: "vae_encode:vae"
52
+
53
+ dynamic_conditioning_chains:
54
+ conditioning_chain:
55
+ ksampler_node: "ksampler"
56
+ clip_source: "t5_tokenizer:0"
57
+
58
+ ui_map:
59
+ unet_name: "unet_loader:unet_name"
60
+ vae_name: "vae_loader:vae_name"
61
+ clip_name: "clip_loader:clip_name"
core/pipelines/workflow_recipes/_partials/conditioning/ernie-image.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ clip_loader:
8
+ class_type: CLIPLoader
9
+ title: "Load CLIP"
10
+ params:
11
+ type: "flux2"
12
+ device: "default"
13
+ vae_loader:
14
+ class_type: VAELoader
15
+ title: "Load VAE"
16
+
17
+ connections:
18
+ - from: "unet_loader:0"
19
+ to: "ksampler:model"
20
+ - from: "clip_loader:0"
21
+ to: "pos_prompt:clip"
22
+ - from: "clip_loader:0"
23
+ to: "neg_prompt:clip"
24
+ - from: "pos_prompt:0"
25
+ to: "ksampler:positive"
26
+ - from: "neg_prompt:0"
27
+ to: "ksampler:negative"
28
+ - from: "vae_loader:0"
29
+ to: "vae_decode:vae"
30
+ - from: "vae_loader:0"
31
+ to: "vae_encode:vae"
32
+
33
+ dynamic_lora_chains:
34
+ lora_chain:
35
+ template: "LoraLoader"
36
+ output_map:
37
+ "unet_loader:0": "model"
38
+ "clip_loader:0": "clip"
39
+ input_map:
40
+ "model": "model"
41
+ "clip": "clip"
42
+ end_input_map:
43
+ "model": ["ksampler:model"]
44
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
45
+
46
+ dynamic_conditioning_chains:
47
+ conditioning_chain:
48
+ ksampler_node: "ksampler"
49
+ clip_source: "clip_loader:0"
50
+
51
+ ui_map:
52
+ unet_name: "unet_loader:unet_name"
53
+ clip_name: "clip_loader:clip_name"
54
+ vae_name: "vae_loader:vae_name"
core/pipelines/workflow_recipes/_partials/conditioning/flux1.yaml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load FLUX UNET"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load FLUX VAE"
10
+ clip_loader:
11
+ class_type: DualCLIPLoader
12
+ title: "Load FLUX Dual CLIP"
13
+ params:
14
+ type: "flux"
15
+ device: "default"
16
+ flux_guidance:
17
+ class_type: FluxGuidance
18
+ title: "FluxGuidance"
19
+
20
+ connections:
21
+ - from: "unet_loader:0"
22
+ to: "ksampler:model"
23
+ - from: "clip_loader:0"
24
+ to: "pos_prompt:clip"
25
+ - from: "clip_loader:0"
26
+ to: "neg_prompt:clip"
27
+ - from: "vae_loader:0"
28
+ to: "vae_decode:vae"
29
+ - from: "vae_loader:0"
30
+ to: "vae_encode:vae"
31
+ - from: "pos_prompt:0"
32
+ to: "flux_guidance:conditioning"
33
+ - from: "flux_guidance:0"
34
+ to: "ksampler:positive"
35
+ - from: "neg_prompt:0"
36
+ to: "ksampler:negative"
37
+
38
+ dynamic_controlnet_chains:
39
+ controlnet_chain:
40
+ template: "ControlNetApplyAdvanced"
41
+ ksampler_node: "ksampler"
42
+ vae_source: "vae_loader:0"
43
+
44
+ dynamic_flux1_ipadapter_chains:
45
+ flux1_ipadapter_chain:
46
+ ksampler_node: "ksampler"
47
+
48
+ dynamic_style_chains:
49
+ style_chain:
50
+ flux_guidance_node: "flux_guidance"
51
+ ksampler_node: "ksampler"
52
+
53
+ dynamic_conditioning_chains:
54
+ conditioning_chain:
55
+ flux_guidance_node: "flux_guidance"
56
+ ksampler_node: "ksampler"
57
+ clip_source: "clip_loader:0"
58
+
59
+ ui_map:
60
+ unet_name: "unet_loader:unet_name"
61
+ vae_name: "vae_loader:vae_name"
62
+ clip1_name: "clip_loader:clip_name1"
63
+ clip2_name: "clip_loader:clip_name2"
64
+ guidance: "flux_guidance:guidance"
core/pipelines/workflow_recipes/_partials/conditioning/flux2-kv.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ clip_loader:
8
+ class_type: CLIPLoader
9
+ title: "Load CLIP"
10
+ params:
11
+ type: "flux2"
12
+ device: "default"
13
+ vae_loader:
14
+ class_type: VAELoader
15
+ title: "Load VAE"
16
+
17
+ flux_kv_cache:
18
+ class_type: FluxKVCache
19
+ title: "Flux KV Cache"
20
+
21
+ pos_prompt:
22
+ class_type: CLIPTextEncode
23
+ title: "CLIP Text Encode (Positive)"
24
+ neg_prompt:
25
+ class_type: CLIPTextEncode
26
+ title: "CLIP Text Encode (Negative)"
27
+
28
+ ksampler:
29
+ class_type: KSampler
30
+ title: "KSampler"
31
+ params:
32
+ denoise: 1.0
33
+
34
+ vae_decode:
35
+ class_type: VAEDecode
36
+ title: "VAE Decode"
37
+
38
+ save_image:
39
+ class_type: SaveImage
40
+ title: "Save Image"
41
+
42
+ connections:
43
+ - from: "unet_loader:0"
44
+ to: "flux_kv_cache:model"
45
+ - from: "flux_kv_cache:0"
46
+ to: "ksampler:model"
47
+
48
+ - from: "clip_loader:0"
49
+ to: "pos_prompt:clip"
50
+ - from: "clip_loader:0"
51
+ to: "neg_prompt:clip"
52
+
53
+ - from: "vae_loader:0"
54
+ to: "vae_decode:vae"
55
+ - from: "vae_loader:0"
56
+ to: "vae_encode:vae"
57
+
58
+ - from: "pos_prompt:0"
59
+ to: "ksampler:positive"
60
+ - from: "neg_prompt:0"
61
+ to: "ksampler:negative"
62
+
63
+ - from: "latent_source:0"
64
+ to: "ksampler:latent_image"
65
+
66
+ - from: "ksampler:0"
67
+ to: "vae_decode:samples"
68
+ - from: "vae_decode:0"
69
+ to: "save_image:images"
70
+
71
+ dynamic_lora_chains:
72
+ lora_chain:
73
+ template: "LoraLoader"
74
+ output_map:
75
+ "unet_loader:0": "model"
76
+ "clip_loader:0": "clip"
77
+ input_map:
78
+ "model": "model"
79
+ "clip": "clip"
80
+ end_input_map:
81
+ "model": ["flux_kv_cache:model"]
82
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
83
+
84
+ dynamic_reference_latent_chains:
85
+ reference_latent_chain:
86
+ ksampler_node: "ksampler"
87
+ vae_node: "vae_loader"
88
+
89
+ ui_map:
90
+ unet_name: "unet_loader:unet_name"
91
+ clip_name: "clip_loader:clip_name"
92
+ vae_name: "vae_loader:vae_name"
93
+
94
+ positive_prompt: "pos_prompt:text"
95
+ negative_prompt: "neg_prompt:text"
96
+
97
+ seed: "ksampler:seed"
98
+ steps: "ksampler:steps"
99
+ cfg: "ksampler:cfg"
100
+ sampler_name: "ksampler:sampler_name"
101
+ scheduler: "ksampler:scheduler"
102
+ denoise: "ksampler:denoise"
103
+
104
+ filename_prefix: "save_image:filename_prefix"
core/pipelines/workflow_recipes/_partials/conditioning/flux2.yaml CHANGED
@@ -20,6 +20,20 @@ nodes:
20
  neg_prompt:
21
  class_type: CLIPTextEncode
22
  title: "CLIP Text Encode (Negative)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  connections:
25
  - from: "unet_loader:0"
@@ -37,6 +51,14 @@ connections:
37
  to: "ksampler:positive"
38
  - from: "neg_prompt:0"
39
  to: "ksampler:negative"
 
 
 
 
 
 
 
 
40
 
41
  dynamic_lora_chains:
42
  lora_chain:
@@ -51,11 +73,6 @@ dynamic_lora_chains:
51
  "model": ["ksampler:model"]
52
  "clip": ["pos_prompt:clip", "neg_prompt:clip"]
53
 
54
- dynamic_conditioning_chains:
55
- conditioning_chain:
56
- ksampler_node: "ksampler"
57
- clip_source: "clip_loader:0"
58
-
59
  dynamic_reference_latent_chains:
60
  reference_latent_chain:
61
  ksampler_node: "ksampler"
@@ -65,5 +82,15 @@ ui_map:
65
  unet_name: "unet_loader:unet_name"
66
  clip_name: "clip_loader:clip_name"
67
  vae_name: "vae_loader:vae_name"
 
68
  positive_prompt: "pos_prompt:text"
69
- negative_prompt: "neg_prompt:text"
 
 
 
 
 
 
 
 
 
 
20
  neg_prompt:
21
  class_type: CLIPTextEncode
22
  title: "CLIP Text Encode (Negative)"
23
+
24
+ ksampler:
25
+ class_type: KSampler
26
+ title: "KSampler"
27
+ params:
28
+ denoise: 1.0
29
+
30
+ vae_decode:
31
+ class_type: VAEDecode
32
+ title: "VAE Decode"
33
+
34
+ save_image:
35
+ class_type: SaveImage
36
+ title: "Save Image"
37
 
38
  connections:
39
  - from: "unet_loader:0"
 
51
  to: "ksampler:positive"
52
  - from: "neg_prompt:0"
53
  to: "ksampler:negative"
54
+
55
+ - from: "latent_source:0"
56
+ to: "ksampler:latent_image"
57
+
58
+ - from: "ksampler:0"
59
+ to: "vae_decode:samples"
60
+ - from: "vae_decode:0"
61
+ to: "save_image:images"
62
 
63
  dynamic_lora_chains:
64
  lora_chain:
 
73
  "model": ["ksampler:model"]
74
  "clip": ["pos_prompt:clip", "neg_prompt:clip"]
75
 
 
 
 
 
 
76
  dynamic_reference_latent_chains:
77
  reference_latent_chain:
78
  ksampler_node: "ksampler"
 
82
  unet_name: "unet_loader:unet_name"
83
  clip_name: "clip_loader:clip_name"
84
  vae_name: "vae_loader:vae_name"
85
+
86
  positive_prompt: "pos_prompt:text"
87
+ negative_prompt: "neg_prompt:text"
88
+
89
+ seed: "ksampler:seed"
90
+ steps: "ksampler:steps"
91
+ cfg: "ksampler:cfg"
92
+ sampler_name: "ksampler:sampler_name"
93
+ scheduler: "ksampler:scheduler"
94
+ denoise: "ksampler:denoise"
95
+
96
+ filename_prefix: "save_image:filename_prefix"
core/pipelines/workflow_recipes/_partials/conditioning/hidream.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load HiDream UNET"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load HiDream VAE"
10
+ clip_loader:
11
+ class_type: QuadrupleCLIPLoader
12
+ title: "Load HiDream Quadruple CLIP"
13
+
14
+ model_sampler:
15
+ class_type: ModelSamplingSD3
16
+ title: "ModelSamplingSD3"
17
+ params:
18
+ shift: 6.0
19
+
20
+ connections:
21
+ - from: "unet_loader:0"
22
+ to: "model_sampler:model"
23
+
24
+ - from: "model_sampler:0"
25
+ to: "ksampler:model"
26
+
27
+ - from: "clip_loader:0"
28
+ to: "pos_prompt:clip"
29
+ - from: "clip_loader:0"
30
+ to: "neg_prompt:clip"
31
+
32
+ - from: "pos_prompt:0"
33
+ to: "ksampler:positive"
34
+ - from: "neg_prompt:0"
35
+ to: "ksampler:negative"
36
+
37
+ - from: "vae_loader:0"
38
+ to: "vae_decode:vae"
39
+ - from: "vae_loader:0"
40
+ to: "vae_encode:vae"
41
+
42
+ dynamic_conditioning_chains:
43
+ conditioning_chain:
44
+ ksampler_node: "ksampler"
45
+ clip_source: "clip_loader:0"
46
+
47
+ ui_map:
48
+ unet_name: "unet_loader:unet_name"
49
+ vae_name: "vae_loader:vae_name"
50
+ clip1_name: "clip_loader:clip_name1"
51
+ clip2_name: "clip_loader:clip_name2"
52
+ clip3_name: "clip_loader:clip_name3"
53
+ clip4_name: "clip_loader:clip_name4"
core/pipelines/workflow_recipes/_partials/conditioning/hunyuanimage.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Hunyuan UNET"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load Hunyuan VAE"
10
+ clip_loader:
11
+ class_type: DualCLIPLoader
12
+ title: "Load Hunyuan Dual CLIP"
13
+ params:
14
+ type: "hunyuan_image"
15
+ device: "default"
16
+
17
+ connections:
18
+ - from: "unet_loader:0"
19
+ to: "ksampler:model"
20
+ - from: "clip_loader:0"
21
+ to: "pos_prompt:clip"
22
+ - from: "clip_loader:0"
23
+ to: "neg_prompt:clip"
24
+ - from: "vae_loader:0"
25
+ to: "vae_decode:vae"
26
+ - from: "vae_loader:0"
27
+ to: "vae_encode:vae"
28
+ - from: "pos_prompt:0"
29
+ to: "ksampler:positive"
30
+ - from: "neg_prompt:0"
31
+ to: "ksampler:negative"
32
+
33
+ dynamic_conditioning_chains:
34
+ conditioning_chain:
35
+ ksampler_node: "ksampler"
36
+ clip_source: "clip_loader:0"
37
+
38
+ ui_map:
39
+ unet_name: "unet_loader:unet_name"
40
+ vae_name: "vae_loader:vae_name"
41
+ clip1_name: "clip_loader:clip_name1"
42
+ clip2_name: "clip_loader:clip_name2"
core/pipelines/workflow_recipes/_partials/conditioning/longcat-image.yaml ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load VAE"
10
+ clip_loader:
11
+ class_type: CLIPLoader
12
+ title: "Load CLIP"
13
+ params:
14
+ type: "longcat_image"
15
+ device: "default"
16
+
17
+ cfg_norm:
18
+ class_type: CFGNorm
19
+ title: "CFGNorm"
20
+ params:
21
+ strength: 1.0
22
+
23
+ flux_guidance_pos:
24
+ class_type: FluxGuidance
25
+ title: "FluxGuidance (Positive)"
26
+ params:
27
+ guidance: 4.0
28
+
29
+ flux_guidance_neg:
30
+ class_type: FluxGuidance
31
+ title: "FluxGuidance (Negative)"
32
+ params:
33
+ guidance: 4.0
34
+
35
+ connections:
36
+ - from: "unet_loader:0"
37
+ to: "cfg_norm:model"
38
+ - from: "cfg_norm:0"
39
+ to: "ksampler:model"
40
+
41
+ - from: "clip_loader:0"
42
+ to: "pos_prompt:clip"
43
+ - from: "clip_loader:0"
44
+ to: "neg_prompt:clip"
45
+
46
+ - from: "pos_prompt:0"
47
+ to: "flux_guidance_pos:conditioning"
48
+ - from: "neg_prompt:0"
49
+ to: "flux_guidance_neg:conditioning"
50
+
51
+ - from: "flux_guidance_pos:0"
52
+ to: "ksampler:positive"
53
+ - from: "flux_guidance_neg:0"
54
+ to: "ksampler:negative"
55
+
56
+ - from: "vae_loader:0"
57
+ to: "vae_decode:vae"
58
+ - from: "vae_loader:0"
59
+ to: "vae_encode:vae"
60
+
61
+ dynamic_lora_chains:
62
+ lora_chain:
63
+ template: "LoraLoader"
64
+ output_map:
65
+ "unet_loader:0": "model"
66
+ "clip_loader:0": "clip"
67
+ input_map:
68
+ "model": "model"
69
+ "clip": "clip"
70
+ end_input_map:
71
+ "model": ["cfg_norm:model"]
72
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
73
+
74
+ dynamic_conditioning_chains:
75
+ conditioning_chain:
76
+ flux_guidance_node: "flux_guidance_pos"
77
+ ksampler_node: "ksampler"
78
+ clip_source: "clip_loader:0"
79
+
80
+ ui_map:
81
+ unet_name: "unet_loader:unet_name"
82
+ vae_name: "vae_loader:vae_name"
83
+ clip_name: "clip_loader:clip_name"
core/pipelines/workflow_recipes/_partials/conditioning/lumina.yaml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ ckpt_loader:
3
+ class_type: CheckpointLoaderSimple
4
+ title: "Load Checkpoint"
5
+ model_sampler:
6
+ class_type: ModelSamplingAuraFlow
7
+ title: "ModelSamplingAuraFlow"
8
+ params:
9
+ shift: 4.0
10
+
11
+ connections:
12
+ - from: "ckpt_loader:0"
13
+ to: "model_sampler:model"
14
+ - from: "model_sampler:0"
15
+ to: "ksampler:model"
16
+
17
+ - from: "ckpt_loader:1"
18
+ to: "pos_prompt:clip"
19
+ - from: "ckpt_loader:1"
20
+ to: "neg_prompt:clip"
21
+ - from: "pos_prompt:0"
22
+ to: "ksampler:positive"
23
+ - from: "neg_prompt:0"
24
+ to: "ksampler:negative"
25
+
26
+ - from: "ckpt_loader:2"
27
+ to: "vae_decode:vae"
28
+ - from: "ckpt_loader:2"
29
+ to: "vae_encode:vae"
30
+
31
+ dynamic_vae_chains:
32
+ vae_chain:
33
+ targets:
34
+ - "vae_decode:vae"
35
+ - "vae_encode:vae"
36
+
37
+ dynamic_lora_chains:
38
+ lora_chain:
39
+ template: "LoraLoader"
40
+ start: "ckpt_loader"
41
+ output_map:
42
+ "0": "model"
43
+ "1": "clip"
44
+ input_map:
45
+ "model": "model"
46
+ "clip": "clip"
47
+ end_input_map:
48
+ "model": ["model_sampler:model"]
49
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
50
+
51
+ dynamic_conditioning_chains:
52
+ conditioning_chain:
53
+ ksampler_node: "ksampler"
54
+ clip_source: "ckpt_loader:1"
55
+
56
+ ui_map:
57
+ model_name: "ckpt_loader:ckpt_name"
core/pipelines/workflow_recipes/_partials/conditioning/newbie-image.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load VAE"
10
+ clip_loader:
11
+ class_type: DualCLIPLoader
12
+ title: "Load Dual CLIP"
13
+ params:
14
+ type: "newbie"
15
+ device: "default"
16
+ model_sampler:
17
+ class_type: ModelSamplingAuraFlow
18
+ title: "ModelSamplingAuraFlow"
19
+ params:
20
+ shift: 6
21
+
22
+ connections:
23
+ - from: "unet_loader:0"
24
+ to: "model_sampler:model"
25
+ - from: "model_sampler:0"
26
+ to: "ksampler:model"
27
+
28
+ - from: "clip_loader:0"
29
+ to: "pos_prompt:clip"
30
+ - from: "clip_loader:0"
31
+ to: "neg_prompt:clip"
32
+
33
+ - from: "pos_prompt:0"
34
+ to: "ksampler:positive"
35
+ - from: "neg_prompt:0"
36
+ to: "ksampler:negative"
37
+
38
+ - from: "vae_loader:0"
39
+ to: "vae_decode:vae"
40
+ - from: "vae_loader:0"
41
+ to: "vae_encode:vae"
42
+
43
+ dynamic_newbie_lora_chains:
44
+ lora_chain:
45
+ template: "NewBieLoraLoader"
46
+ output_map:
47
+ "unet_loader:0": "model"
48
+ "clip_loader:0": "clip"
49
+ input_map:
50
+ "model": "model"
51
+ "clip": "clip"
52
+ end_input_map:
53
+ "model": ["model_sampler:model"]
54
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
55
+
56
+ dynamic_conditioning_chains:
57
+ conditioning_chain:
58
+ ksampler_node: "ksampler"
59
+ clip_source: "clip_loader:0"
60
+
61
+ ui_map:
62
+ unet_name: "unet_loader:unet_name"
63
+ vae_name: "vae_loader:vae_name"
64
+ clip1_name: "clip_loader:clip_name1"
65
+ clip2_name: "clip_loader:clip_name2"
core/pipelines/workflow_recipes/_partials/conditioning/omnigen2.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load VAE"
10
+ clip_loader:
11
+ class_type: CLIPLoader
12
+ title: "Load CLIP"
13
+ params:
14
+ type: "omnigen2"
15
+ device: "default"
16
+
17
+ connections:
18
+ - from: "unet_loader:0"
19
+ to: "ksampler:model"
20
+ - from: "clip_loader:0"
21
+ to: "pos_prompt:clip"
22
+ - from: "clip_loader:0"
23
+ to: "neg_prompt:clip"
24
+ - from: "pos_prompt:0"
25
+ to: "ksampler:positive"
26
+ - from: "neg_prompt:0"
27
+ to: "ksampler:negative"
28
+ - from: "vae_loader:0"
29
+ to: "vae_decode:vae"
30
+ - from: "vae_loader:0"
31
+ to: "vae_encode:vae"
32
+
33
+ dynamic_lora_chains:
34
+ lora_chain:
35
+ template: "LoraLoader"
36
+ output_map:
37
+ "unet_loader:0": "model"
38
+ "clip_loader:0": "clip"
39
+ input_map:
40
+ "model": "model"
41
+ "clip": "clip"
42
+ end_input_map:
43
+ "model": ["ksampler:model"]
44
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
45
+
46
+ dynamic_conditioning_chains:
47
+ conditioning_chain:
48
+ ksampler_node: "ksampler"
49
+ clip_source: "clip_loader:0"
50
+
51
+ dynamic_reference_latent_chains:
52
+ reference_latent_chain:
53
+ ksampler_node: "ksampler"
54
+ vae_node: "vae_loader"
55
+
56
+ ui_map:
57
+ unet_name: "unet_loader:unet_name"
58
+ vae_name: "vae_loader:vae_name"
59
+ clip_name: "clip_loader:clip_name"
core/pipelines/workflow_recipes/_partials/conditioning/ovis-image.yaml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load VAE"
10
+ clip_loader:
11
+ class_type: CLIPLoader
12
+ title: "Load CLIP"
13
+ params:
14
+ type: "ovis"
15
+ device: "default"
16
+ model_sampler:
17
+ class_type: ModelSamplingAuraFlow
18
+ params:
19
+ shift: 3.0
20
+
21
+ connections:
22
+ - from: "unet_loader:0"
23
+ to: "model_sampler:model"
24
+ - from: "model_sampler:0"
25
+ to: "ksampler:model"
26
+
27
+ - from: "clip_loader:0"
28
+ to: "pos_prompt:clip"
29
+ - from: "clip_loader:0"
30
+ to: "neg_prompt:clip"
31
+
32
+ - from: "pos_prompt:0"
33
+ to: "ksampler:positive"
34
+ - from: "neg_prompt:0"
35
+ to: "ksampler:negative"
36
+
37
+ - from: "vae_loader:0"
38
+ to: "vae_decode:vae"
39
+ - from: "vae_loader:0"
40
+ to: "vae_encode:vae"
41
+
42
+ dynamic_conditioning_chains:
43
+ conditioning_chain:
44
+ ksampler_node: "ksampler"
45
+ clip_source: "clip_loader:0"
46
+
47
+ ui_map:
48
+ unet_name: "unet_loader:unet_name"
49
+ vae_name: "vae_loader:vae_name"
50
+ clip_name: "clip_loader:clip_name"
core/pipelines/workflow_recipes/_partials/conditioning/qwen-image.yaml ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Qwen UNET"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load Qwen VAE"
10
+ clip_loader:
11
+ class_type: CLIPLoader
12
+ title: "Load Qwen CLIP"
13
+ params:
14
+ type: "qwen_image"
15
+ device: "default"
16
+
17
+ lora_loader:
18
+ class_type: LoraLoaderModelOnly
19
+ title: "Load Qwen Lightning LoRA"
20
+ params:
21
+ strength_model: 1.0
22
+ model_sampler:
23
+ class_type: ModelSamplingAuraFlow
24
+ title: "ModelSamplingAuraFlow"
25
+ params:
26
+ shift: 3.1
27
+
28
+ connections:
29
+ - from: "unet_loader:0"
30
+ to: "lora_loader:model"
31
+ - from: "lora_loader:0"
32
+ to: "model_sampler:model"
33
+
34
+ - from: "model_sampler:0"
35
+ to: "ksampler:model"
36
+
37
+ - from: "clip_loader:0"
38
+ to: "pos_prompt:clip"
39
+ - from: "clip_loader:0"
40
+ to: "neg_prompt:clip"
41
+
42
+ - from: "vae_loader:0"
43
+ to: "vae_decode:vae"
44
+ - from: "vae_loader:0"
45
+ to: "vae_encode:vae"
46
+
47
+ - from: "pos_prompt:0"
48
+ to: "ksampler:positive"
49
+ - from: "neg_prompt:0"
50
+ to: "ksampler:negative"
51
+
52
+ dynamic_lora_chains:
53
+ lora_chain:
54
+ template: "LoraLoader"
55
+ output_map:
56
+ "lora_loader:0": "model"
57
+ "clip_loader:0": "clip"
58
+ input_map:
59
+ "model": "model"
60
+ "clip": "clip"
61
+ end_input_map:
62
+ "model": ["model_sampler:model"]
63
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
64
+
65
+ dynamic_controlnet_chains:
66
+ controlnet_chain:
67
+ template: "ControlNetApplyAdvanced"
68
+ ksampler_node: "ksampler"
69
+ vae_source: "vae_loader:0"
70
+
71
+ dynamic_conditioning_chains:
72
+ conditioning_chain:
73
+ ksampler_node: "ksampler"
74
+ clip_source: "clip_loader:0"
75
+
76
+ ui_map:
77
+ unet_name: "unet_loader:unet_name"
78
+ vae_name: "vae_loader:vae_name"
79
+ clip_name: "clip_loader:clip_name"
80
+ lora_name: "lora_loader:lora_name"
core/pipelines/workflow_recipes/_partials/conditioning/sd15.yaml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ ckpt_loader:
3
+ class_type: CheckpointLoaderSimple
4
+ title: "Load Checkpoint"
5
+ clip_set_last_layer:
6
+ class_type: CLIPSetLastLayer
7
+ title: "CLIP Set Last Layer"
8
+
9
+ connections:
10
+ - from: "ckpt_loader:0"
11
+ to: "ksampler:model"
12
+ - from: "ckpt_loader:1"
13
+ to: "clip_set_last_layer:clip"
14
+ - from: "clip_set_last_layer:0"
15
+ to: "pos_prompt:clip"
16
+ - from: "clip_set_last_layer:0"
17
+ to: "neg_prompt:clip"
18
+ - from: "pos_prompt:0"
19
+ to: "ksampler:positive"
20
+ - from: "neg_prompt:0"
21
+ to: "ksampler:negative"
22
+ - from: "ckpt_loader:2"
23
+ to: "vae_decode:vae"
24
+ - from: "ckpt_loader:2"
25
+ to: "vae_encode:vae"
26
+
27
+ dynamic_vae_chains:
28
+ vae_chain:
29
+ targets:
30
+ - "vae_decode:vae"
31
+ - "vae_encode:vae"
32
+
33
+ dynamic_lora_chains:
34
+ lora_chain:
35
+ template: "LoraLoader"
36
+ start: "clip_set_last_layer"
37
+ output_map:
38
+ "ckpt_loader:0": "model"
39
+ "0": "clip"
40
+ input_map:
41
+ "model": "model"
42
+ "clip": "clip"
43
+ end_input_map:
44
+ "model": ["ksampler:model"]
45
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
46
+
47
+ dynamic_controlnet_chains:
48
+ controlnet_chain:
49
+ template: "ControlNetApplyAdvanced"
50
+ ksampler_node: "ksampler"
51
+ vae_source: "ckpt_loader:2"
52
+
53
+ dynamic_ipadapter_chains:
54
+ ipadapter_chain:
55
+ end: "ksampler"
56
+ final_preset: "{{ ipadapter_final_preset }}"
57
+ final_weight: "{{ ipadapter_final_weight }}"
58
+ final_embeds_scaling: "{{ ipadapter_embeds_scaling }}"
59
+ final_loader_type: "{{ ipadapter_final_loader_type }}"
60
+ final_lora_strength: "{{ ipadapter_final_lora_strength }}"
61
+
62
+ dynamic_conditioning_chains:
63
+ conditioning_chain:
64
+ ksampler_node: "ksampler"
65
+ clip_source: "clip_set_last_layer:0"
66
+
67
+ ui_map:
68
+ model_name: "ckpt_loader:ckpt_name"
69
+ clip_skip: "clip_set_last_layer:stop_at_clip_layer"
core/pipelines/workflow_recipes/_partials/conditioning/sd35.yaml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ ckpt_loader:
3
+ class_type: CheckpointLoaderSimple
4
+ title: "Load Checkpoint"
5
+
6
+ connections:
7
+ - from: "ckpt_loader:0"
8
+ to: "ksampler:model"
9
+ - from: "ckpt_loader:1"
10
+ to: "pos_prompt:clip"
11
+ - from: "ckpt_loader:1"
12
+ to: "neg_prompt:clip"
13
+ - from: "pos_prompt:0"
14
+ to: "ksampler:positive"
15
+ - from: "neg_prompt:0"
16
+ to: "ksampler:negative"
17
+ - from: "ckpt_loader:2"
18
+ to: "vae_decode:vae"
19
+ - from: "ckpt_loader:2"
20
+ to: "vae_encode:vae"
21
+
22
+ dynamic_vae_chains:
23
+ vae_chain:
24
+ targets:
25
+ - "vae_decode:vae"
26
+ - "vae_encode:vae"
27
+
28
+ dynamic_lora_chains:
29
+ lora_chain:
30
+ template: "LoraLoader"
31
+ start: "ckpt_loader"
32
+ output_map:
33
+ "0": "model"
34
+ "1": "clip"
35
+ input_map:
36
+ "model": "model"
37
+ "clip": "clip"
38
+ end_input_map:
39
+ "model": ["ksampler:model"]
40
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
41
+
42
+ dynamic_controlnet_chains:
43
+ controlnet_chain:
44
+ template: "ControlNetApplyAdvanced"
45
+ ksampler_node: "ksampler"
46
+ vae_source: "ckpt_loader:2"
47
+
48
+ dynamic_sd3_ipadapter_chains:
49
+ sd3_ipadapter_chain:
50
+ ksampler_node: "ksampler"
51
+
52
+ dynamic_conditioning_chains:
53
+ conditioning_chain:
54
+ ksampler_node: "ksampler"
55
+ clip_source: "ckpt_loader:1"
56
+
57
+ ui_map:
58
+ model_name: "ckpt_loader:ckpt_name"
core/pipelines/workflow_recipes/_partials/conditioning/sdxl.yaml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ ckpt_loader:
3
+ class_type: CheckpointLoaderSimple
4
+ title: "Load Checkpoint"
5
+
6
+ connections:
7
+ - from: "ckpt_loader:0"
8
+ to: "ksampler:model"
9
+ - from: "ckpt_loader:1"
10
+ to: "pos_prompt:clip"
11
+ - from: "ckpt_loader:1"
12
+ to: "neg_prompt:clip"
13
+ - from: "pos_prompt:0"
14
+ to: "ksampler:positive"
15
+ - from: "neg_prompt:0"
16
+ to: "ksampler:negative"
17
+ - from: "ckpt_loader:2"
18
+ to: "vae_decode:vae"
19
+ - from: "ckpt_loader:2"
20
+ to: "vae_encode:vae"
21
+
22
+ dynamic_vae_chains:
23
+ vae_chain:
24
+ targets:
25
+ - "vae_decode:vae"
26
+ - "vae_encode:vae"
27
+
28
+ dynamic_lora_chains:
29
+ lora_chain:
30
+ template: "LoraLoader"
31
+ start: "ckpt_loader"
32
+ output_map:
33
+ "0": "model"
34
+ "1": "clip"
35
+ input_map:
36
+ "model": "model"
37
+ "clip": "clip"
38
+ end_input_map:
39
+ "model": ["ksampler:model"]
40
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
41
+
42
+ dynamic_controlnet_chains:
43
+ controlnet_chain:
44
+ template: "ControlNetApplyAdvanced"
45
+ ksampler_node: "ksampler"
46
+ vae_source: "ckpt_loader:2"
47
+
48
+ dynamic_ipadapter_chains:
49
+ ipadapter_chain:
50
+ end: "ksampler"
51
+ final_preset: "{{ ipadapter_final_preset }}"
52
+ final_weight: "{{ ipadapter_final_weight }}"
53
+ final_embeds_scaling: "{{ ipadapter_embeds_scaling }}"
54
+ final_loader_type: "{{ ipadapter_final_loader_type }}"
55
+ final_lora_strength: "{{ ipadapter_final_lora_strength }}"
56
+
57
+ dynamic_conditioning_chains:
58
+ conditioning_chain:
59
+ ksampler_node: "ksampler"
60
+ clip_source: "ckpt_loader:1"
61
+
62
+ ui_map:
63
+ model_name: "ckpt_loader:ckpt_name"
core/pipelines/workflow_recipes/_partials/conditioning/z-image.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ unet_loader:
3
+ class_type: UNETLoader
4
+ title: "Load Diffusion Model"
5
+ params:
6
+ weight_dtype: "default"
7
+ vae_loader:
8
+ class_type: VAELoader
9
+ title: "Load VAE"
10
+ clip_loader:
11
+ class_type: CLIPLoader
12
+ title: "Load CLIP"
13
+ params:
14
+ type: "lumina2"
15
+ device: "default"
16
+ model_sampler:
17
+ class_type: ModelSamplingAuraFlow
18
+ params:
19
+ shift: 3.0
20
+
21
+ connections:
22
+ - from: "unet_loader:0"
23
+ to: "model_sampler:model"
24
+ - from: "model_sampler:0"
25
+ to: "ksampler:model"
26
+
27
+ - from: "clip_loader:0"
28
+ to: "pos_prompt:clip"
29
+ - from: "clip_loader:0"
30
+ to: "neg_prompt:clip"
31
+
32
+ - from: "pos_prompt:0"
33
+ to: "ksampler:positive"
34
+ - from: "neg_prompt:0"
35
+ to: "ksampler:negative"
36
+
37
+ - from: "vae_loader:0"
38
+ to: "vae_decode:vae"
39
+ - from: "vae_loader:0"
40
+ to: "vae_encode:vae"
41
+
42
+ dynamic_lora_chains:
43
+ lora_chain:
44
+ template: "LoraLoader"
45
+ output_map:
46
+ "unet_loader:0": "model"
47
+ "clip_loader:0": "clip"
48
+ input_map:
49
+ "model": "model"
50
+ "clip": "clip"
51
+ end_input_map:
52
+ "model": ["model_sampler:model"]
53
+ "clip": ["pos_prompt:clip", "neg_prompt:clip"]
54
+
55
+ dynamic_diffsynth_controlnet_chains:
56
+ diffsynth_controlnet_chain:
57
+ template: "QwenImageDiffsynthControlnet"
58
+ model_sampler_node: "model_sampler"
59
+ ksampler_node: "ksampler"
60
+ vae_source: "vae_loader:0"
61
+
62
+ ui_map:
63
+ unet_name: "unet_loader:unet_name"
64
+ vae_name: "vae_loader:vae_name"
65
+ clip_name: "clip_loader:clip_name"
core/pipelines/workflow_recipes/_partials/input/hires_fix.yaml CHANGED
@@ -1,15 +1,16 @@
1
  nodes:
2
  input_image_loader:
3
  class_type: LoadImage
4
-
5
  vae_encode:
6
  class_type: VAEEncode
7
-
8
  latent_upscaler:
9
  class_type: LatentUpscaleBy
10
-
11
  latent_source:
12
  class_type: RepeatLatentBatch
 
13
 
14
  connections:
15
  - from: "input_image_loader:0"
 
1
  nodes:
2
  input_image_loader:
3
  class_type: LoadImage
4
+ title: "Load Input Image"
5
  vae_encode:
6
  class_type: VAEEncode
7
+ title: "VAE Encode (Hires Pre-step)"
8
  latent_upscaler:
9
  class_type: LatentUpscaleBy
10
+ title: "Upscale Latent By"
11
  latent_source:
12
  class_type: RepeatLatentBatch
13
+ title: "Repeat Latent Batch for Hires"
14
 
15
  connections:
16
  - from: "input_image_loader:0"
core/pipelines/workflow_recipes/_partials/input/img2img.yaml CHANGED
@@ -1,12 +1,13 @@
1
  nodes:
2
  input_image_loader:
3
  class_type: LoadImage
4
-
5
  vae_encode:
6
  class_type: VAEEncode
7
-
8
  latent_source:
9
  class_type: RepeatLatentBatch
 
10
 
11
  connections:
12
  - from: "input_image_loader:0"
 
1
  nodes:
2
  input_image_loader:
3
  class_type: LoadImage
4
+ title: "Load Input Image"
5
  vae_encode:
6
  class_type: VAEEncode
7
+ title: "VAE Encode (Img2Img)"
8
  latent_source:
9
  class_type: RepeatLatentBatch
10
+ title: "Repeat Latent Batch"
11
 
12
  connections:
13
  - from: "input_image_loader:0"
core/pipelines/workflow_recipes/_partials/input/inpaint.yaml CHANGED
@@ -2,24 +2,22 @@ nodes:
2
  inpaint_loader:
3
  class_type: LoadImage
4
  title: "Load Inpaint Image+Mask"
5
-
6
  vae_encode:
7
  class_type: VAEEncodeForInpaint
8
- params:
9
- grow_mask_by: 6
10
-
11
  latent_source:
12
  class_type: RepeatLatentBatch
13
-
 
14
  connections:
15
  - from: "inpaint_loader:0"
16
  to: "vae_encode:pixels"
17
  - from: "inpaint_loader:1"
18
  to: "vae_encode:mask"
19
-
20
  - from: "vae_encode:0"
21
  to: "latent_source:samples"
22
 
23
  ui_map:
24
- inpaint_image: "inpaint_loader:image"
25
- batch_size: "latent_source:amount"
 
 
2
  inpaint_loader:
3
  class_type: LoadImage
4
  title: "Load Inpaint Image+Mask"
 
5
  vae_encode:
6
  class_type: VAEEncodeForInpaint
7
+ title: "VAE Encode (for Inpainting)"
 
 
8
  latent_source:
9
  class_type: RepeatLatentBatch
10
+ title: "Repeat Latent Batch"
11
+
12
  connections:
13
  - from: "inpaint_loader:0"
14
  to: "vae_encode:pixels"
15
  - from: "inpaint_loader:1"
16
  to: "vae_encode:mask"
 
17
  - from: "vae_encode:0"
18
  to: "latent_source:samples"
19
 
20
  ui_map:
21
+ input_image: "inpaint_loader:image"
22
+ batch_size: "latent_source:amount"
23
+ grow_mask_by: "vae_encode:grow_mask_by"
core/pipelines/workflow_recipes/_partials/input/outpaint.yaml CHANGED
@@ -1,38 +1,41 @@
1
  nodes:
2
  input_image_loader:
3
  class_type: LoadImage
4
-
 
 
 
 
 
5
  pad_image:
6
  class_type: ImagePadForOutpaint
7
- params:
8
- feathering: 10
9
-
10
  vae_encode:
11
  class_type: VAEEncodeForInpaint
12
- params:
13
- grow_mask_by: 6
14
-
15
  latent_source:
16
  class_type: RepeatLatentBatch
 
17
 
18
  connections:
19
  - from: "input_image_loader:0"
 
 
20
  to: "pad_image:image"
21
-
22
  - from: "pad_image:0"
23
  to: "vae_encode:pixels"
24
  - from: "pad_image:1"
25
  to: "vae_encode:mask"
26
-
27
  - from: "vae_encode:0"
28
  to: "latent_source:samples"
29
 
30
  ui_map:
31
  input_image: "input_image_loader:image"
32
-
33
  left: "pad_image:left"
34
  top: "pad_image:top"
35
  right: "pad_image:right"
36
  bottom: "pad_image:bottom"
37
-
 
38
  batch_size: "latent_source:amount"
 
1
  nodes:
2
  input_image_loader:
3
  class_type: LoadImage
4
+ title: "Load Image for Outpaint"
5
+ scale_image:
6
+ class_type: ImageScaleToTotalPixels
7
+ title: "Scale Image to Total Pixels"
8
+ params:
9
+ upscale_method: "nearest-exact"
10
  pad_image:
11
  class_type: ImagePadForOutpaint
12
+ title: "Pad Image for Outpainting"
 
 
13
  vae_encode:
14
  class_type: VAEEncodeForInpaint
15
+ title: "VAE Encode (for Inpainting)"
 
 
16
  latent_source:
17
  class_type: RepeatLatentBatch
18
+ title: "Repeat Latent Batch"
19
 
20
  connections:
21
  - from: "input_image_loader:0"
22
+ to: "scale_image:image"
23
+ - from: "scale_image:0"
24
  to: "pad_image:image"
 
25
  - from: "pad_image:0"
26
  to: "vae_encode:pixels"
27
  - from: "pad_image:1"
28
  to: "vae_encode:mask"
 
29
  - from: "vae_encode:0"
30
  to: "latent_source:samples"
31
 
32
  ui_map:
33
  input_image: "input_image_loader:image"
34
+ megapixels: "scale_image:megapixels"
35
  left: "pad_image:left"
36
  top: "pad_image:top"
37
  right: "pad_image:right"
38
  bottom: "pad_image:bottom"
39
+ feathering: "pad_image:feathering"
40
+ grow_mask_by: "vae_encode:grow_mask_by"
41
  batch_size: "latent_source:amount"
core/pipelines/workflow_recipes/_partials/input/txt2img.yaml CHANGED
@@ -1,8 +1,2 @@
1
- nodes:
2
- latent_source:
3
- class_type: EmptyFlux2LatentImage
4
-
5
- ui_map:
6
- width: "latent_source:width"
7
- height: "latent_source:height"
8
- batch_size: "latent_source:batch_size"
 
1
+ imports:
2
+ - "txt2img_{{ latent_type }}.yaml"
 
 
 
 
 
 
core/pipelines/workflow_recipes/_partials/input/txt2img_chroma_radiance_latent.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ latent_source:
3
+ class_type: "EmptyChromaRadianceLatentImage"
4
+ title: "EmptyChromaRadianceLatentImage"
5
+
6
+ connections: []
7
+
8
+ ui_map:
9
+ width: "latent_source:width"
10
+ height: "latent_source:height"
11
+ batch_size: "latent_source:batch_size"
core/pipelines/workflow_recipes/_partials/input/txt2img_flux2_latent.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ latent_source:
3
+ class_type: "EmptyFlux2LatentImage"
4
+ title: "Empty Flux 2 Latent"
5
+
6
+ connections: []
7
+
8
+ ui_map:
9
+ width: "latent_source:width"
10
+ height: "latent_source:height"
11
+ batch_size: "latent_source:batch_size"
core/pipelines/workflow_recipes/_partials/input/txt2img_hunyuan_latent.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ latent_source:
3
+ class_type: "EmptyHunyuanImageLatent"
4
+ title: "EmptyHunyuanImageLatent"
5
+
6
+ connections: []
7
+
8
+ ui_map:
9
+ width: "latent_source:width"
10
+ height: "latent_source:height"
11
+ batch_size: "latent_source:batch_size"
core/pipelines/workflow_recipes/_partials/input/txt2img_latent.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ latent_source:
3
+ class_type: "{{ latent_generator_template }}"
4
+ title: "Empty Latent Image"
5
+
6
+ connections: []
7
+
8
+ ui_map:
9
+ width: "latent_source:width"
10
+ height: "latent_source:height"
11
+ batch_size: "latent_source:batch_size"
core/pipelines/workflow_recipes/_partials/input/txt2img_sd3_latent.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ latent_source:
3
+ class_type: "EmptySD3LatentImage"
4
+ title: "EmptySD3LatentImage"
5
+
6
+ connections: []
7
+
8
+ ui_map:
9
+ width: "latent_source:width"
10
+ height: "latent_source:height"
11
+ batch_size: "latent_source:batch_size"
core/pipelines/workflow_recipes/sd_unified_recipe.yaml CHANGED
@@ -1,7 +1,7 @@
1
  imports:
2
- - "_partials/_base_sampler.yaml"
3
  - "_partials/input/{{ task_type }}.yaml"
4
- - "_partials/conditioning/flux2.yaml"
5
 
6
  connections:
7
  - from: "latent_source:0"
 
1
  imports:
2
+ - "_partials/_base_sampler_sd.yaml"
3
  - "_partials/input/{{ task_type }}.yaml"
4
+ - "_partials/conditioning/{{ model_type }}.yaml"
5
 
6
  connections:
7
  - from: "latent_source:0"
core/settings.py CHANGED
@@ -10,16 +10,37 @@ MODEL_PATCHES_DIR = "models/model_patches"
10
  DIFFUSION_MODELS_DIR = "models/diffusion_models"
11
  VAE_DIR = "models/vae"
12
  TEXT_ENCODERS_DIR = "models/text_encoders"
 
 
 
 
13
  INPUT_DIR = "input"
14
  OUTPUT_DIR = "output"
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17
  _MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_list.yaml')
18
  _FILE_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'file_list.yaml')
 
19
  _CONSTANTS_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'constants.yaml')
 
 
20
  _MODEL_DEFAULTS_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_defaults.yaml')
21
 
22
-
23
  def load_constants_from_yaml(filepath=_CONSTANTS_PATH):
24
  if not os.path.exists(filepath):
25
  print(f"Warning: Constants file not found at {filepath}. Using fallback values.")
@@ -27,6 +48,27 @@ def load_constants_from_yaml(filepath=_CONSTANTS_PATH):
27
  with open(filepath, 'r', encoding='utf-8') as f:
28
  return yaml.safe_load(f)
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def load_file_download_map(filepath=_FILE_LIST_PATH):
31
  if not os.path.exists(filepath):
32
  raise FileNotFoundError(f"The file list (for downloads) was not found at: {filepath}")
@@ -59,50 +101,86 @@ def load_models_from_yaml(model_list_filepath=_MODEL_LIST_PATH, download_map=Non
59
  }
60
  category_map_names = {
61
  "Checkpoint": "MODEL_MAP_CHECKPOINT",
 
62
  }
63
 
64
- for category, models in model_data.items():
65
  if category in category_map_names:
66
  map_name = category_map_names[category]
67
- if not isinstance(models, list): continue
68
- for model in models:
69
- display_name = model['display_name']
70
- components = model.get('components', {})
71
-
72
- model_tuple = (
73
- None,
74
- components,
75
- "SDXL",
76
- None
77
- )
78
- model_maps[map_name][display_name] = model_tuple
79
- model_maps["ALL_MODEL_MAP"][display_name] = model_tuple
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
  return model_maps
82
 
83
- def load_model_defaults(filepath=_MODEL_DEFAULTS_PATH):
84
- if not os.path.exists(filepath):
85
- print(f"Warning: Model defaults file not found at {filepath}. Using empty defaults.")
86
- return {}
87
- with open(filepath, 'r', encoding='utf-8') as f:
88
- return yaml.safe_load(f)
89
-
90
  try:
91
  ALL_FILE_DOWNLOAD_MAP = load_file_download_map()
92
  loaded_maps = load_models_from_yaml(download_map=ALL_FILE_DOWNLOAD_MAP)
93
  MODEL_MAP_CHECKPOINT = loaded_maps["MODEL_MAP_CHECKPOINT"]
94
  ALL_MODEL_MAP = loaded_maps["ALL_MODEL_MAP"]
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  MODEL_TYPE_MAP = {k: v[2] for k, v in ALL_MODEL_MAP.items()}
97
-
98
- ALL_MODEL_DEFAULTS = load_model_defaults()
 
 
 
 
 
 
 
99
 
100
  except Exception as e:
101
  print(f"FATAL: Could not load model configuration from YAML. Error: {e}")
102
  ALL_FILE_DOWNLOAD_MAP = {}
103
  MODEL_MAP_CHECKPOINT, ALL_MODEL_MAP = {}, {}
104
  MODEL_TYPE_MAP = {}
105
- ALL_MODEL_DEFAULTS = {}
106
 
107
 
108
  try:
@@ -111,15 +189,17 @@ try:
111
  MAX_EMBEDDINGS = _constants.get('MAX_EMBEDDINGS', 5)
112
  MAX_CONDITIONINGS = _constants.get('MAX_CONDITIONINGS', 10)
113
  MAX_CONTROLNETS = _constants.get('MAX_CONTROLNETS', 5)
114
- MAX_REFERENCE_LATENTS = _constants.get('MAX_REFERENCE_LATENTS', 10)
115
  LORA_SOURCE_CHOICES = _constants.get('LORA_SOURCE_CHOICES', ["Civitai", "File"])
116
  RESOLUTION_MAP = _constants.get('RESOLUTION_MAP', {})
 
 
 
117
  except Exception as e:
118
  print(f"FATAL: Could not load constants from YAML. Error: {e}")
119
- MAX_LORAS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_CONTROLNETS = 5, 5, 10, 5
120
- MAX_REFERENCE_LATENTS = 10
121
  LORA_SOURCE_CHOICES = ["Civitai", "File"]
122
  RESOLUTION_MAP = {}
123
-
124
-
125
- DEFAULT_NEGATIVE_PROMPT = ""
 
10
  DIFFUSION_MODELS_DIR = "models/diffusion_models"
11
  VAE_DIR = "models/vae"
12
  TEXT_ENCODERS_DIR = "models/text_encoders"
13
+ STYLE_MODELS_DIR = "models/style_models"
14
+ CLIP_VISION_DIR = "models/clip_vision"
15
+ IPADAPTER_DIR = "models/ipadapter"
16
+ IPADAPTER_FLUX_DIR = "models/ipadapter-flux"
17
  INPUT_DIR = "input"
18
  OUTPUT_DIR = "output"
19
 
20
+ CATEGORY_TO_DIR_MAP = {
21
+ "diffusion_models": DIFFUSION_MODELS_DIR,
22
+ "text_encoders": TEXT_ENCODERS_DIR,
23
+ "vae": VAE_DIR,
24
+ "checkpoints": CHECKPOINT_DIR,
25
+ "loras": LORA_DIR,
26
+ "controlnet": CONTROLNET_DIR,
27
+ "model_patches": MODEL_PATCHES_DIR,
28
+ "embeddings": EMBEDDING_DIR,
29
+ "style_models": STYLE_MODELS_DIR,
30
+ "clip_vision": CLIP_VISION_DIR,
31
+ "ipadapter": IPADAPTER_DIR,
32
+ "ipadapter-flux": IPADAPTER_FLUX_DIR
33
+ }
34
+
35
  _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
36
  _MODEL_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_list.yaml')
37
  _FILE_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'file_list.yaml')
38
+ _IPADAPTER_LIST_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'ipadapter.yaml')
39
  _CONSTANTS_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'constants.yaml')
40
+ _MODEL_ARCHITECTURES_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_architectures.yaml')
41
+ _IMAGE_GEN_FEATURES_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'image_gen_features.yaml')
42
  _MODEL_DEFAULTS_PATH = os.path.join(_PROJECT_ROOT, 'yaml', 'model_defaults.yaml')
43
 
 
44
  def load_constants_from_yaml(filepath=_CONSTANTS_PATH):
45
  if not os.path.exists(filepath):
46
  print(f"Warning: Constants file not found at {filepath}. Using fallback values.")
 
48
  with open(filepath, 'r', encoding='utf-8') as f:
49
  return yaml.safe_load(f)
50
 
51
+ def load_architectures_config(filepath=_MODEL_ARCHITECTURES_PATH):
52
+ if not os.path.exists(filepath):
53
+ print(f"Warning: Architectures file not found at {filepath}.")
54
+ return {}
55
+ with open(filepath, 'r', encoding='utf-8') as f:
56
+ return yaml.safe_load(f)
57
+
58
+ def load_features_config(filepath=_IMAGE_GEN_FEATURES_PATH):
59
+ if not os.path.exists(filepath):
60
+ print(f"Warning: Features file not found at {filepath}.")
61
+ return {}
62
+ with open(filepath, 'r', encoding='utf-8') as f:
63
+ return yaml.safe_load(f)
64
+
65
+ def load_model_defaults(filepath=_MODEL_DEFAULTS_PATH):
66
+ if not os.path.exists(filepath):
67
+ print(f"Warning: Model defaults file not found at {filepath}.")
68
+ return {}
69
+ with open(filepath, 'r', encoding='utf-8') as f:
70
+ return yaml.safe_load(f)
71
+
72
  def load_file_download_map(filepath=_FILE_LIST_PATH):
73
  if not os.path.exists(filepath):
74
  raise FileNotFoundError(f"The file list (for downloads) was not found at: {filepath}")
 
101
  }
102
  category_map_names = {
103
  "Checkpoint": "MODEL_MAP_CHECKPOINT",
104
+ "Checkpoints": "MODEL_MAP_CHECKPOINT"
105
  }
106
 
107
+ for category, architectures in model_data.items():
108
  if category in category_map_names:
109
  map_name = category_map_names[category]
110
+ if not isinstance(architectures, dict): continue
111
+
112
+ for arch, arch_data in architectures.items():
113
+ if not isinstance(arch_data, dict): continue
114
+
115
+ latent_type = arch_data.get('latent_type', 'latent')
116
+ models = arch_data.get('models', [])
117
+ if not isinstance(models, list): continue
118
+
119
+ for model in models:
120
+ display_name = model['display_name']
121
+ path_or_components = model.get('path') or model.get('components')
122
+ mod_category = model.get('category', None)
123
+
124
+ repo_id = ''
125
+ if isinstance(path_or_components, str):
126
+ download_info = download_map.get(path_or_components, {})
127
+ repo_id = download_info.get('repo_id', '')
128
+
129
+ model_tuple = (
130
+ repo_id,
131
+ path_or_components,
132
+ arch,
133
+ latent_type,
134
+ mod_category
135
+ )
136
+ model_maps[map_name][display_name] = model_tuple
137
+ model_maps["ALL_MODEL_MAP"][display_name] = model_tuple
138
 
139
  return model_maps
140
 
 
 
 
 
 
 
 
141
  try:
142
  ALL_FILE_DOWNLOAD_MAP = load_file_download_map()
143
  loaded_maps = load_models_from_yaml(download_map=ALL_FILE_DOWNLOAD_MAP)
144
  MODEL_MAP_CHECKPOINT = loaded_maps["MODEL_MAP_CHECKPOINT"]
145
  ALL_MODEL_MAP = loaded_maps["ALL_MODEL_MAP"]
146
 
147
+ category_to_model_type = {
148
+ "diffusion_models": "UNET",
149
+ "text_encoders": "TEXT_ENCODER",
150
+ "vae": "VAE",
151
+ "checkpoints": "SDXL",
152
+ "loras": "LORA",
153
+ "controlnet": "CONTROLNET",
154
+ "model_patches": "MODEL_PATCH",
155
+ "style_models": "STYLE",
156
+ "clip_vision": "CLIP_VISION",
157
+ "ipadapter": "IPADAPTER",
158
+ "ipadapter-flux": "IPADAPTER_FLUX"
159
+ }
160
+ for filename, file_info in ALL_FILE_DOWNLOAD_MAP.items():
161
+ if filename not in ALL_MODEL_MAP:
162
+ category = file_info.get('category')
163
+ model_type = category_to_model_type.get(category, 'UNKNOWN')
164
+ repo_id = file_info.get('repo_id', '')
165
+ ALL_MODEL_MAP[filename] = (repo_id, filename, model_type, None, None)
166
+
167
  MODEL_TYPE_MAP = {k: v[2] for k, v in ALL_MODEL_MAP.items()}
168
+
169
+ ARCH_CATEGORIES_MAP = {}
170
+ for display_name, info in MODEL_MAP_CHECKPOINT.items():
171
+ arch = info[2]
172
+ cat = info[4] if len(info) > 4 else None
173
+ if arch not in ARCH_CATEGORIES_MAP:
174
+ ARCH_CATEGORIES_MAP[arch] = []
175
+ if cat and cat not in ARCH_CATEGORIES_MAP[arch]:
176
+ ARCH_CATEGORIES_MAP[arch].append(cat)
177
 
178
  except Exception as e:
179
  print(f"FATAL: Could not load model configuration from YAML. Error: {e}")
180
  ALL_FILE_DOWNLOAD_MAP = {}
181
  MODEL_MAP_CHECKPOINT, ALL_MODEL_MAP = {}, {}
182
  MODEL_TYPE_MAP = {}
183
+ ARCH_CATEGORIES_MAP = {}
184
 
185
 
186
  try:
 
189
  MAX_EMBEDDINGS = _constants.get('MAX_EMBEDDINGS', 5)
190
  MAX_CONDITIONINGS = _constants.get('MAX_CONDITIONINGS', 10)
191
  MAX_CONTROLNETS = _constants.get('MAX_CONTROLNETS', 5)
192
+ MAX_IPADAPTERS = _constants.get('MAX_IPADAPTERS', 5)
193
  LORA_SOURCE_CHOICES = _constants.get('LORA_SOURCE_CHOICES', ["Civitai", "File"])
194
  RESOLUTION_MAP = _constants.get('RESOLUTION_MAP', {})
195
+ ARCHITECTURES_CONFIG = load_architectures_config()
196
+ FEATURES_CONFIG = load_features_config()
197
+ MODEL_DEFAULTS_CONFIG = load_model_defaults()
198
  except Exception as e:
199
  print(f"FATAL: Could not load constants from YAML. Error: {e}")
200
+ MAX_LORAS, MAX_EMBEDDINGS, MAX_CONDITIONINGS, MAX_CONTROLNETS, MAX_IPADAPTERS = 5, 5, 10, 5, 5
 
201
  LORA_SOURCE_CHOICES = ["Civitai", "File"]
202
  RESOLUTION_MAP = {}
203
+ ARCHITECTURES_CONFIG = {}
204
+ FEATURES_CONFIG = {}
205
+ MODEL_DEFAULTS_CONFIG = {}
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- comfyui-frontend-package==1.42.14
2
- comfyui-workflow-templates==0.9.59
3
- comfyui-embedded-docs==0.4.3
4
  torch==2.10.0
5
  torchsde
6
  torchvision==0.25.0
@@ -19,11 +19,11 @@ scipy
19
  tqdm
20
  psutil
21
  alembic
22
- SQLAlchemy>=2.0
23
  filelock
24
  av>=14.2.0
25
  comfy-kitchen>=0.2.8
26
- comfy-aimdo>=0.2.12
27
  requests
28
  simpleeval>=1.0.0
29
  blake3
@@ -58,4 +58,5 @@ svglib
58
  trimesh[easy]
59
  yacs
60
  yapf
61
- onnxruntime-gpu
 
 
1
+ comfyui-frontend-package==1.42.15
2
+ comfyui-workflow-templates==0.9.66
3
+ comfyui-embedded-docs==0.4.4
4
  torch==2.10.0
5
  torchsde
6
  torchvision==0.25.0
 
19
  tqdm
20
  psutil
21
  alembic
22
+ SQLAlchemy>=2.0.0
23
  filelock
24
  av>=14.2.0
25
  comfy-kitchen>=0.2.8
26
+ comfy-aimdo==0.3.0
27
  requests
28
  simpleeval>=1.0.0
29
  blake3
 
58
  trimesh[easy]
59
  yacs
60
  yapf
61
+ onnxruntime-gpu
62
+ diffusers