javier233455 commited on
Commit
ee5d363
·
verified ·
1 Parent(s): bd3437a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +847 -269
app.py CHANGED
@@ -1,43 +1,57 @@
 
 
 
 
 
 
1
  import os
2
  import gc
 
 
 
 
3
  import gradio as gr
4
  import numpy as np
5
  import spaces
6
  import torch
7
  import random
8
  from PIL import Image
9
- from typing import Iterable
10
  from gradio.themes import Soft
11
  from gradio.themes.utils import colors, fonts, sizes
12
-
13
- colors.orange_red = colors.Color(
14
- name="orange_red",
15
- c50="#FFF0E5",
16
- c100="#FFE0CC",
17
- c200="#FFC299",
18
- c300="#FFA366",
19
- c400="#FF8533",
20
- c500="#FF4500",
21
- c600="#E63E00",
22
- c700="#CC3700",
23
- c800="#B33000",
24
- c900="#992900",
25
- c950="#802200",
 
 
 
 
26
  )
27
 
28
- class OrangeRedTheme(Soft):
29
  def __init__(
30
  self,
31
  *,
32
- primary_hue: colors.Color | str = colors.gray,
33
- secondary_hue: colors.Color | str = colors.orange_red,
34
- neutral_hue: colors.Color | str = colors.slate,
35
  text_size: sizes.Size | str = sizes.text_lg,
36
  font: fonts.Font | str | Iterable[fonts.Font | str] = (
37
- fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
38
  ),
39
  font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
40
- fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
41
  ),
42
  ):
43
  super().__init__(
@@ -50,44 +64,42 @@ class OrangeRedTheme(Soft):
50
  )
51
  super().set(
52
  background_fill_primary="*primary_50",
53
- background_fill_primary_dark="*primary_900",
54
- body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
55
- body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
56
  button_primary_text_color="white",
57
- button_primary_text_color_hover="white",
58
  button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
59
  button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
60
- button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)",
61
- button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)",
62
- button_secondary_text_color="black",
63
- button_secondary_text_color_hover="white",
64
- button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
65
- button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
66
- button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
67
- button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
68
  slider_color="*secondary_500",
69
- slider_color_dark="*secondary_600",
70
  block_title_text_weight="600",
71
- block_border_width="3px",
72
  block_shadow="*shadow_drop_lg",
73
- button_primary_shadow="*shadow_drop_lg",
74
- button_large_padding="11px",
75
- color_accent_soft="*primary_100",
76
- block_label_background_fill="*primary_200",
77
  )
78
 
79
- orange_red_theme = OrangeRedTheme()
80
 
 
 
 
81
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
82
 
83
- print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
84
- print("torch.__version__ =", torch.__version__)
85
- print("Using device:", device)
86
-
87
  from diffusers import FlowMatchEulerDiscreteScheduler
88
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
89
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
90
- from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
 
 
 
 
 
 
91
 
92
  dtype = torch.bfloat16
93
 
@@ -101,197 +113,407 @@ pipe = QwenImageEditPlusPipeline.from_pretrained(
101
  torch_dtype=dtype
102
  ).to(device)
103
 
104
- try:
105
- pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
106
- print("Flash Attention 3 Processor set successfully.")
107
- except Exception as e:
108
- print(f"Warning: Could not set FA3 processor: {e}")
 
109
 
110
  MAX_SEED = np.iinfo(np.int32).max
111
 
 
 
 
112
  ADAPTER_SPECS = {
113
- "Multiple-Angles": {
114
- "repo": "dx8152/Qwen-Edit-2509-Multiple-angles",
115
- "weights": "镜头转换.safetensors",
116
- "adapter_name": "multiple-angles"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  },
118
- "Photo-to-Anime": {
 
 
 
 
 
 
 
 
 
 
119
  "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
120
  "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
121
- "adapter_name": "photo-to-anime"
 
 
 
122
  },
123
- "Anime-V2": {
124
  "repo": "prithivMLmods/Qwen-Image-Edit-2511-Anime",
125
  "weights": "Qwen-Image-Edit-2511-Anime-2000.safetensors",
126
- "adapter_name": "anime-v2"
 
 
 
127
  },
128
- "Light-Migration": {
129
- "repo": "dx8152/Qwen-Edit-2509-Light-Migration",
130
- "weights": "参考色调.safetensors",
131
- "adapter_name": "light-migration"
 
 
 
132
  },
133
- "Upscaler": {
134
- "repo": "starsfriday/Qwen-Image-Edit-2511-Upscale2K",
135
- "weights": "qwen_image_edit_2511_upscale.safetensors",
136
- "adapter_name": "upscale-2k"
 
 
 
 
 
137
  },
138
- "Style-Transfer": {
139
- "repo": "zooeyy/Style-Transfer",
140
- "weights": "Style Transfer-Alpha-V0.1.safetensors",
141
- "adapter_name": "style-transfer"
 
 
 
142
  },
143
- "Manga-Tone": {
144
- "repo": "nappa114514/Qwen-Image-Edit-2509-Manga-Tone",
145
- "weights": "tone001.safetensors",
146
- "adapter_name": "manga-tone"
 
 
 
 
 
147
  },
148
- "Anything2Real": {
149
- "repo": "lrzjason/Anything2Real_2601",
150
- "weights": "anything2real_2601.safetensors",
151
- "adapter_name": "anything2real"
 
 
 
152
  },
153
- "Fal-Multiple-Angles": {
154
- "repo": "fal/Qwen-Image-Edit-2511-Multiple-Angles-LoRA",
155
- "weights": "qwen-image-edit-2511-multiple-angles-lora.safetensors",
156
- "adapter_name": "fal-multiple-angles"
 
 
 
157
  },
158
- "Polaroid-Photo": {
159
  "repo": "prithivMLmods/Qwen-Image-Edit-2511-Polaroid-Photo",
160
  "weights": "Qwen-Image-Edit-2511-Polaroid-Photo.safetensors",
161
- "adapter_name": "polaroid-photo"
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  },
163
- "Unblur-Anything": {
164
  "repo": "prithivMLmods/Qwen-Image-Edit-2511-Unblur-Upscale",
165
  "weights": "Qwen-Image-Edit-Unblur-Upscale_15.safetensors",
166
- "adapter_name": "unblur-anything"
 
 
 
167
  },
168
- "Midnight-Noir-Eyes-Spotlight": {
169
- "repo": "prithivMLmods/Qwen-Image-Edit-2511-Midnight-Noir-Eyes-Spotlight",
170
- "weights": "Qwen-Image-Edit-2511-Midnight-Noir-Eyes-Spotlight.safetensors",
171
- "adapter_name": "midnight-noir-eyes-spotlight"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  },
173
- "Hyper-Realistic-Portrait": {
174
- "repo": "prithivMLmods/Qwen-Image-Edit-2511-Hyper-Realistic-Portrait",
175
- "weights": "HRP_20.safetensors",
176
- "adapter_name": "hyper-realistic-portrait"
177
- },
178
- "Ultra-Realistic-Portrait": {
179
- "repo": "prithivMLmods/Qwen-Image-Edit-2511-Ultra-Realistic-Portrait",
180
- "weights": "URP_20.safetensors",
181
- "adapter_name": "ultra-realistic-portrait"
182
- },
183
- "Pixar-Inspired-3D": {
184
- "repo": "prithivMLmods/Qwen-Image-Edit-2511-Pixar-Inspired-3D",
185
- "weights": "PI3_20.safetensors",
186
- "adapter_name": "pi3"
187
- },
188
- "Noir-Comic-Book": {
189
- "repo": "prithivMLmods/Qwen-Image-Edit-2511-Noir-Comic-Book-Panel",
190
- "weights": "Noir-Comic-Book-Panel_20.safetensors",
191
- "adapter_name": "ncb"
192
- },
193
- "Any-light": {
194
- "repo": "lilylilith/QIE-2511-MP-AnyLight",
195
- "weights": "QIE-2511-AnyLight_.safetensors",
196
- "adapter_name": "any-light"
197
- },
198
  }
199
 
 
200
  LOADED_ADAPTERS = set()
201
 
202
- def update_dimensions_on_upload(image):
203
- if image is None:
204
- return 1024, 1024
205
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  original_width, original_height = image.size
207
 
208
  if original_width > original_height:
209
- new_width = 1024
210
  aspect_ratio = original_height / original_width
211
  new_height = int(new_width * aspect_ratio)
212
  else:
213
- new_height = 1024
214
  aspect_ratio = original_width / original_height
215
  new_width = int(new_height * aspect_ratio)
216
-
 
217
  new_width = (new_width // 8) * 8
218
  new_height = (new_height // 8) * 8
219
 
220
  return new_width, new_height
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  @spaces.GPU
223
- def infer(
224
- images,
225
- prompt,
226
- lora_adapter,
227
- seed,
228
- randomize_seed,
229
- guidance_scale,
230
- steps,
 
 
231
  progress=gr.Progress(track_tqdm=True)
232
- ):
 
233
  gc.collect()
234
  torch.cuda.empty_cache()
235
-
236
- if not images:
237
- raise gr.Error("Please upload at least one image to edit.")
238
-
239
- pil_images = []
240
- if images is not None:
241
- for item in images:
242
- try:
243
- if isinstance(item, tuple) or isinstance(item, list):
244
- path_or_img = item[0]
245
- else:
246
- path_or_img = item
247
-
248
- if isinstance(path_or_img, str):
249
- pil_images.append(Image.open(path_or_img).convert("RGB"))
250
- elif isinstance(path_or_img, Image.Image):
251
- pil_images.append(path_or_img.convert("RGB"))
252
- else:
253
- pil_images.append(Image.open(path_or_img.name).convert("RGB"))
254
- except Exception as e:
255
- print(f"Skipping invalid image item: {e}")
256
- continue
257
-
258
- if not pil_images:
259
- raise gr.Error("Could not process uploaded images.")
260
-
 
 
261
  spec = ADAPTER_SPECS.get(lora_adapter)
262
  if not spec:
263
- raise gr.Error(f"Configuration not found for: {lora_adapter}")
264
-
265
  adapter_name = spec["adapter_name"]
266
-
267
  if adapter_name not in LOADED_ADAPTERS:
268
- print(f"--- Downloading and Loading Adapter: {lora_adapter} ---")
269
  try:
270
  pipe.load_lora_weights(
271
- spec["repo"],
272
- weight_name=spec["weights"],
273
  adapter_name=adapter_name
274
  )
275
  LOADED_ADAPTERS.add(adapter_name)
276
  except Exception as e:
277
- raise gr.Error(f"Failed to load adapter {lora_adapter}: {e}")
278
  else:
279
- print(f"--- Adapter {lora_adapter} is already loaded. ---")
280
-
281
  pipe.set_adapters([adapter_name], adapter_weights=[1.0])
282
-
 
283
  if randomize_seed:
284
  seed = random.randint(0, MAX_SEED)
285
-
286
  generator = torch.Generator(device=device).manual_seed(seed)
287
- negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
288
-
289
- width, height = update_dimensions_on_upload(pil_images[0])
290
-
 
 
 
 
 
 
 
291
  try:
292
  result_image = pipe(
293
- image=pil_images,
294
- prompt=prompt,
295
  negative_prompt=negative_prompt,
296
  height=height,
297
  width=width,
@@ -300,117 +522,473 @@ def infer(
300
  true_cfg_scale=guidance_scale,
301
  ).images[0]
302
 
303
- return result_image, seed
304
-
305
  except Exception as e:
306
- raise e
307
  finally:
308
  gc.collect()
309
  torch.cuda.empty_cache()
310
 
311
  @spaces.GPU
312
- def infer_example(images, prompt, lora_adapter):
313
- if not images:
314
- return None, 0
 
 
 
 
 
 
 
 
 
 
 
 
315
 
316
- if isinstance(images, str):
317
- images_list = [images]
318
- else:
319
- images_list = images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
 
321
- result, seed = infer(
322
- images=images_list,
323
- prompt=prompt,
324
- lora_adapter=lora_adapter,
325
- seed=0,
326
- randomize_seed=True,
327
- guidance_scale=1.0,
328
- steps=4
329
- )
330
- return result, seed
331
-
332
- css="""
333
- #col-container {
334
- margin: 0 auto;
335
- max-width: 1000px;
336
- }
337
- #main-title h1 {font-size: 2.3em !important;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  """
339
 
340
- with gr.Blocks() as demo:
341
- with gr.Column(elem_id="col-container"):
342
- gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
343
- gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511) adapters. Upload one or more images.")
344
-
345
- with gr.Row(equal_height=True):
346
- with gr.Column():
347
- images = gr.Gallery(
348
- label="Upload Images",
349
- type="filepath",
350
- columns=2,
351
- rows=1,
352
- height=300,
353
- allow_preview=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  )
355
 
356
- prompt = gr.Text(
357
- label="Edit Prompt",
358
- show_label=True,
359
- placeholder="e.g., transform into anime..",
 
 
 
 
 
 
 
 
 
 
360
  )
361
-
362
- run_button = gr.Button("Edit Image", variant="primary")
363
-
364
- with gr.Column():
365
- output_image = gr.Image(label="Output Image", interactive=False, format="png", height=363)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
 
367
- with gr.Row():
368
- lora_adapter = gr.Dropdown(
369
- label="Choose Editing Style",
370
- choices=list(ADAPTER_SPECS.keys()),
371
- value="Photo-to-Anime"
372
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373
 
374
- with gr.Accordion("Advanced Settings", open=False, visible=False):
375
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
376
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
377
- guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
378
- steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
 
379
 
380
- gr.Examples(
381
- examples=[
382
- [["examples/B.jpg"], "Transform into anime.", "Photo-to-Anime"],
383
- [["examples/HRP.jpg"], "Transform into a hyper-realistic face portrait.", "Hyper-Realistic-Portrait"],
384
- [["examples/A.jpeg"], "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
385
- [["examples/U.jpg"], "Upscale this picture to 4K resolution.", "Upscaler"],
386
- [["examples/L1.jpg", "examples/L2.jpg"], "Apply the lighting from image 2 to image 1.", "Any-light"],
387
- [["examples/PP1.jpg"], "cinematic polaroid with soft grain subtle vignette gentle lighting white frame handwritten photographed by hf‪‪‬ preserving realistic texture and details", "Polaroid-Photo"],
388
- [["examples/Z1.jpg"], "Front-right quarter view.", "Fal-Multiple-Angles"],
389
- [["examples/PI.jpg"], "Transform it into Pixar-inspired 3D.", "Pixar-Inspired-3D"],
390
- [["examples/MT.jpg"], "Paint with manga tone.", "Manga-Tone"],
391
- [["examples/NCB.jpg"], "Transform into a noir comic book style.", "Noir-Comic-Book"],
392
- [["examples/URP.jpg"], "ultra-realistic portrait.", "Ultra-Realistic-Portrait"],
393
- [["examples/MN.jpg"], "Transform into Midnight Noir Eyes Spotlight.", "Midnight-Noir-Eyes-Spotlight"],
394
- [["examples/ST1.jpg", "examples/ST2.jpg"], "Convert Image 1 to the style of Image 2.", "Style-Transfer"],
395
- [["examples/R1.jpg"], "Change the picture to realistic photograph.", "Anything2Real"],
396
- [["examples/UA.jpeg"], "Unblur and upscale.", "Unblur-Anything"],
397
- [["examples/L1.jpg", "examples/L2.jpg"], "Refer to the color tone, remove the original lighting from Image 1, and relight Image 1 based on the lighting and color tone of Image 2.", "Light-Migration"],
398
- [["examples/P1.jpg"], "Transform into anime (while preserving the background and remaining elements maintaining realism and original details.)", "Anime-V2"],
399
- ],
400
- inputs=[images, prompt, lora_adapter],
401
- outputs=[output_image, seed],
402
- fn=infer_example,
403
- cache_examples=False,
404
- label="Examples"
405
  )
406
-
407
- gr.Markdown("[*](https://huggingface.co/spaces/prithivMLmods/Qwen-Image-Edit-2511-LoRAs-Fast)This is still an experimental Space for Qwen-Image-Edit-2511.")
408
-
409
- run_button.click(
410
- fn=infer,
411
- inputs=[images, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
412
- outputs=[output_image, seed]
413
- )
414
 
 
 
 
415
  if __name__ == "__main__":
416
- demo.queue(max_size=30).launch(css=css, theme=orange_red_theme, mcp_server=True, ssr_mode=False, show_error=True)
 
 
 
 
 
1
+ """
2
+ 🎨 Manga Colorizer Pro - Qwen-Image-Edit-2511
3
+ Coloriza manga/cómic usando IA con imágenes de referencia de estilo.
4
+ Soporta imágenes individuales y archivos ZIP.
5
+ """
6
+
7
  import os
8
  import gc
9
+ import io
10
+ import zipfile
11
+ import tempfile
12
+ import shutil
13
  import gradio as gr
14
  import numpy as np
15
  import spaces
16
  import torch
17
  import random
18
  from PIL import Image
19
+ from typing import Iterable, List, Optional, Tuple
20
  from gradio.themes import Soft
21
  from gradio.themes.utils import colors, fonts, sizes
22
+ from datetime import datetime
23
+
24
+ # ============================================
25
+ # TEMA PERSONALIZADO
26
+ # ============================================
27
+ colors.manga_purple = colors.Color(
28
+ name="manga_purple",
29
+ c50="#F5F3FF",
30
+ c100="#EDE9FE",
31
+ c200="#DDD6FE",
32
+ c300="#C4B5FD",
33
+ c400="#A78BFA",
34
+ c500="#8B5CF6",
35
+ c600="#7C3AED",
36
+ c700="#6D28D9",
37
+ c800="#5B21B6",
38
+ c900="#4C1D95",
39
+ c950="#3B0764",
40
  )
41
 
42
+ class MangaColorizerTheme(Soft):
43
  def __init__(
44
  self,
45
  *,
46
+ primary_hue: colors.Color | str = colors.slate,
47
+ secondary_hue: colors.Color | str = colors.manga_purple,
48
+ neutral_hue: colors.Color | str = colors.gray,
49
  text_size: sizes.Size | str = sizes.text_lg,
50
  font: fonts.Font | str | Iterable[fonts.Font | str] = (
51
+ fonts.GoogleFont("Inter"), "Arial", "sans-serif",
52
  ),
53
  font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
54
+ fonts.GoogleFont("JetBrains Mono"), "ui-monospace", "monospace",
55
  ),
56
  ):
57
  super().__init__(
 
64
  )
65
  super().set(
66
  background_fill_primary="*primary_50",
67
+ background_fill_primary_dark="*primary_950",
68
+ body_background_fill="linear-gradient(135deg, *secondary_100, *primary_100)",
69
+ body_background_fill_dark="linear-gradient(135deg, *primary_950, *secondary_950)",
70
  button_primary_text_color="white",
 
71
  button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
72
  button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
 
 
 
 
 
 
 
 
73
  slider_color="*secondary_500",
 
74
  block_title_text_weight="600",
75
+ block_border_width="2px",
76
  block_shadow="*shadow_drop_lg",
77
+ button_large_padding="12px 24px",
 
 
 
78
  )
79
 
80
+ manga_theme = MangaColorizerTheme()
81
 
82
+ # ============================================
83
+ # CONFIGURACIÓN DE DISPOSITIVO
84
+ # ============================================
85
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
86
+ print(f"🖥️ CUDA_VISIBLE_DEVICES = {os.environ.get('CUDA_VISIBLE_DEVICES')}")
87
+ print(f"🔧 torch.__version__ = {torch.__version__}")
88
+ print(f"⚡ Using device: {device}")
89
 
90
+ # ============================================
91
+ # CARGA DEL MODELO
92
+ # ============================================
 
93
  from diffusers import FlowMatchEulerDiscreteScheduler
94
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
95
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
96
+
97
+ try:
98
+ from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
99
+ FA3_AVAILABLE = True
100
+ except ImportError:
101
+ FA3_AVAILABLE = False
102
+ print("⚠️ Flash Attention 3 not available, using default attention")
103
 
104
  dtype = torch.bfloat16
105
 
 
113
  torch_dtype=dtype
114
  ).to(device)
115
 
116
+ if FA3_AVAILABLE:
117
+ try:
118
+ pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
119
+ print("✅ Flash Attention 3 Processor set successfully.")
120
+ except Exception as e:
121
+ print(f"⚠️ Could not set FA3 processor: {e}")
122
 
123
  MAX_SEED = np.iinfo(np.int32).max
124
 
125
+ # ============================================
126
+ # CATÁLOGO COMPLETO DE LoRAs
127
+ # ============================================
128
  ADAPTER_SPECS = {
129
+ # === COLORIZACIÓN Y ESTILO ===
130
+ "🎨 Style-Transfer": {
131
+ "repo": "zooeyy/Style-Transfer",
132
+ "weights": "Style Transfer-Alpha-V0.1.safetensors",
133
+ "adapter_name": "style-transfer",
134
+ "description": "Transfiere el estilo de color de una imagen de referencia",
135
+ "prompt_template": "Apply the color style and palette from the reference image to colorize this manga panel with vibrant colors while preserving line art details.",
136
+ "category": "colorization"
137
+ },
138
+ "💡 Light-Migration": {
139
+ "repo": "dx8152/Qwen-Edit-2509-Light-Migration",
140
+ "weights": "参考色调.safetensors",
141
+ "adapter_name": "light-migration",
142
+ "description": "Migra la iluminación y tonos de color de referencia",
143
+ "prompt_template": "Refer to the color tone from the reference image, apply similar lighting and color grading to colorize this manga with natural shading.",
144
+ "category": "colorization"
145
+ },
146
+ "🌟 Any-Light": {
147
+ "repo": "lilylilith/QIE-2511-MP-AnyLight",
148
+ "weights": "QIE-2511-AnyLight_.safetensors",
149
+ "adapter_name": "any-light",
150
+ "description": "Aplica iluminación personalizada basada en referencia",
151
+ "prompt_template": "Apply the lighting from the reference image to illuminate and colorize this manga panel with dramatic lighting effects.",
152
+ "category": "colorization"
153
  },
154
+ "🖼️ Anything2Real": {
155
+ "repo": "lrzjason/Anything2Real_2601",
156
+ "weights": "anything2real_2601.safetensors",
157
+ "adapter_name": "anything2real",
158
+ "description": "Convierte ilustraciones a estilo realista con colores naturales",
159
+ "prompt_template": "Transform this manga into a realistic photograph style with natural colors, realistic skin tones, and detailed texturing.",
160
+ "category": "colorization"
161
+ },
162
+
163
+ # === ANIME Y MANGA ===
164
+ "🌸 Photo-to-Anime": {
165
  "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
166
  "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
167
+ "adapter_name": "photo-to-anime",
168
+ "description": "Convierte a estilo anime con colores vibrantes",
169
+ "prompt_template": "Transform into vibrant anime style with beautiful colors, cel shading, and anime aesthetics.",
170
+ "category": "anime"
171
  },
172
+ "🎌 Anime-V2": {
173
  "repo": "prithivMLmods/Qwen-Image-Edit-2511-Anime",
174
  "weights": "Qwen-Image-Edit-2511-Anime-2000.safetensors",
175
+ "adapter_name": "anime-v2",
176
+ "description": "Anime cel shading de alta calidad",
177
+ "prompt_template": "Transform into high-quality anime with flat cel shading, vibrant colors, and clean line art while preserving original details.",
178
+ "category": "anime"
179
  },
180
+ "📰 Manga-Tone": {
181
+ "repo": "nappa114514/Qwen-Image-Edit-2509-Manga-Tone",
182
+ "weights": "tone001.safetensors",
183
+ "adapter_name": "manga-tone",
184
+ "description": "Aplica tonos de manga (screentones)",
185
+ "prompt_template": "Apply manga screentone patterns and shading techniques to enhance this manga panel.",
186
+ "category": "manga"
187
  },
188
+
189
+ # === RETRATOS Y REALISMO ===
190
+ "👤 Hyper-Realistic-Portrait": {
191
+ "repo": "prithivMLmods/Qwen-Image-Edit-2511-Hyper-Realistic-Portrait",
192
+ "weights": "HRP_20.safetensors",
193
+ "adapter_name": "hyper-realistic-portrait",
194
+ "description": "Retratos hiper-realistas con colores de piel naturales",
195
+ "prompt_template": "Transform into a hyper-realistic portrait with natural skin colors, detailed textures, and photorealistic lighting.",
196
+ "category": "portrait"
197
  },
198
+ "✨ Ultra-Realistic-Portrait": {
199
+ "repo": "prithivMLmods/Qwen-Image-Edit-2511-Ultra-Realistic-Portrait",
200
+ "weights": "URP_20.safetensors",
201
+ "adapter_name": "ultra-realistic-portrait",
202
+ "description": "Retratos ultra-realistas de alta fidelidad",
203
+ "prompt_template": "Transform into an ultra-realistic glamour portrait with natural colors while preserving identity.",
204
+ "category": "portrait"
205
  },
206
+
207
+ # === ESTILOS ARTÍSTICOS ===
208
+ "🎬 Pixar-Inspired-3D": {
209
+ "repo": "prithivMLmods/Qwen-Image-Edit-2511-Pixar-Inspired-3D",
210
+ "weights": "PI3_20.safetensors",
211
+ "adapter_name": "pi3",
212
+ "description": "Estilo 3D inspirado en Pixar con colores vibrantes",
213
+ "prompt_template": "Transform into Pixar-inspired 3D style with vibrant colors, smooth shading, and cartoon aesthetics.",
214
+ "category": "artistic"
215
  },
216
+ "🖤 Noir-Comic-Book": {
217
+ "repo": "prithivMLmods/Qwen-Image-Edit-2511-Noir-Comic-Book-Panel",
218
+ "weights": "Noir-Comic-Book-Panel_20.safetensors",
219
+ "adapter_name": "ncb",
220
+ "description": "Estilo cómic noir con colores dramáticos",
221
+ "prompt_template": "Transform into noir comic book style with dramatic shadows, limited color palette, and high contrast.",
222
+ "category": "artistic"
223
  },
224
+ "🌙 Midnight-Noir-Eyes-Spotlight": {
225
+ "repo": "prithivMLmods/Qwen-Image-Edit-2511-Midnight-Noir-Eyes-Spotlight",
226
+ "weights": "Qwen-Image-Edit-2511-Midnight-Noir-Eyes-Spotlight.safetensors",
227
+ "adapter_name": "midnight-noir-eyes-spotlight",
228
+ "description": "Estilo noir con énfasis en ojos",
229
+ "prompt_template": "Transform into Midnight Noir style with spotlight on eyes, dramatic dark tones and moody atmosphere.",
230
+ "category": "artistic"
231
  },
232
+ "📷 Polaroid-Photo": {
233
  "repo": "prithivMLmods/Qwen-Image-Edit-2511-Polaroid-Photo",
234
  "weights": "Qwen-Image-Edit-2511-Polaroid-Photo.safetensors",
235
+ "adapter_name": "polaroid-photo",
236
+ "description": "Estilo foto polaroid vintage",
237
+ "prompt_template": "Cinematic polaroid with soft grain, subtle vignette, gentle lighting, and vintage color tones.",
238
+ "category": "artistic"
239
+ },
240
+
241
+ # === MEJORA DE CALIDAD ===
242
+ "🔍 Upscaler": {
243
+ "repo": "starsfriday/Qwen-Image-Edit-2511-Upscale2K",
244
+ "weights": "qwen_image_edit_2511_upscale.safetensors",
245
+ "adapter_name": "upscale-2k",
246
+ "description": "Mejora resolución a 2K/4K",
247
+ "prompt_template": "Upscale this image to high resolution while preserving all details and enhancing clarity.",
248
+ "category": "enhancement"
249
  },
250
+ "🔎 Unblur-Anything": {
251
  "repo": "prithivMLmods/Qwen-Image-Edit-2511-Unblur-Upscale",
252
  "weights": "Qwen-Image-Edit-Unblur-Upscale_15.safetensors",
253
+ "adapter_name": "unblur-anything",
254
+ "description": "Elimina blur y mejora nitidez",
255
+ "prompt_template": "Unblur and upscale this image, restore fine details and enhance sharpness.",
256
+ "category": "enhancement"
257
  },
258
+
259
+ # === CÁMARA Y ÁNGULOS ===
260
+ "📐 Multiple-Angles": {
261
+ "repo": "dx8152/Qwen-Edit-2509-Multiple-angles",
262
+ "weights": "镜头转换.safetensors",
263
+ "adapter_name": "multiple-angles",
264
+ "description": "Genera diferentes ángulos de cámara",
265
+ "prompt_template": "Generate this scene from a different camera angle while maintaining color consistency.",
266
+ "category": "camera"
267
+ },
268
+ "🎥 Fal-Multiple-Angles": {
269
+ "repo": "fal/Qwen-Image-Edit-2511-Multiple-Angles-LoRA",
270
+ "weights": "qwen-image-edit-2511-multiple-angles-lora.safetensors",
271
+ "adapter_name": "fal-multiple-angles",
272
+ "description": "Múltiples ángulos de cámara (versión Fal)",
273
+ "prompt_template": "Generate different viewing angle of this scene while preserving colors and details.",
274
+ "category": "camera"
275
+ },
276
+
277
+ # === MANIPULACIÓN DE OBJETOS ===
278
+ "➕ Object-Adder": {
279
+ "repo": "prithivMLmods/Qwen-Image-Edit-2511-Object-Adder",
280
+ "weights": "Qwen-Image-Edit-2511-Object-Adder.safetensors",
281
+ "adapter_name": "object-adder",
282
+ "description": "Añade objetos preservando el estilo",
283
+ "prompt_template": "Add the specified objects to the image while preserving the color scheme and artistic style.",
284
+ "category": "manipulation"
285
+ },
286
+ "➖ Object-Remover": {
287
+ "repo": "prithivMLmods/Qwen-Image-Edit-2511-Object-Remover",
288
+ "weights": "Qwen-Image-Edit-2511-Object-Remover.safetensors",
289
+ "adapter_name": "object-remover",
290
+ "description": "Elimina objetos de forma limpia",
291
+ "prompt_template": "Remove the specified objects from the image while preserving the background and maintaining color consistency.",
292
+ "category": "manipulation"
293
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
  }
295
 
296
+ # LoRAs cargadas en memoria
297
  LOADED_ADAPTERS = set()
298
 
299
+ # Formatos de imagen soportados
300
+ SUPPORTED_IMAGE_FORMATS = {'.jpg', '.jpeg', '.png', '.webp', '.bmp', '.gif', '.tiff', '.tif'}
301
+
302
+ # ============================================
303
+ # FUNCIONES AUXILIARES
304
+ # ============================================
305
+
306
+ def get_adapters_by_category(category: str = None) -> list:
307
+ """Obtiene adaptadores filtrados por categoría"""
308
+ if category is None:
309
+ return list(ADAPTER_SPECS.keys())
310
+ return [name for name, spec in ADAPTER_SPECS.items() if spec.get("category") == category]
311
+
312
+ def get_colorization_adapters() -> list:
313
+ """Obtiene solo los adaptadores útiles para colorización"""
314
+ colorization_categories = ["colorization", "anime", "artistic", "portrait"]
315
+ return [name for name, spec in ADAPTER_SPECS.items() if spec.get("category") in colorization_categories]
316
+
317
+ def calculate_dimensions(image: Image.Image, max_size: int = 1024) -> Tuple[int, int]:
318
+ """Calcula dimensiones óptimas manteniendo aspect ratio"""
319
  original_width, original_height = image.size
320
 
321
  if original_width > original_height:
322
+ new_width = max_size
323
  aspect_ratio = original_height / original_width
324
  new_height = int(new_width * aspect_ratio)
325
  else:
326
+ new_height = max_size
327
  aspect_ratio = original_width / original_height
328
  new_width = int(new_height * aspect_ratio)
329
+
330
+ # Asegurar divisibilidad por 8
331
  new_width = (new_width // 8) * 8
332
  new_height = (new_height // 8) * 8
333
 
334
  return new_width, new_height
335
 
336
+ def is_valid_image(filename: str) -> bool:
337
+ """Verifica si el archivo es una imagen válida"""
338
+ ext = os.path.splitext(filename.lower())[1]
339
+ return ext in SUPPORTED_IMAGE_FORMATS
340
+
341
+ def load_image_safe(path_or_file) -> Optional[Image.Image]:
342
+ """Carga una imagen de forma segura"""
343
+ try:
344
+ if isinstance(path_or_file, str):
345
+ return Image.open(path_or_file).convert("RGB")
346
+ elif isinstance(path_or_file, Image.Image):
347
+ return path_or_file.convert("RGB")
348
+ elif hasattr(path_or_file, 'name'):
349
+ return Image.open(path_or_file.name).convert("RGB")
350
+ elif isinstance(path_or_file, (tuple, list)):
351
+ return load_image_safe(path_or_file[0])
352
+ return None
353
+ except Exception as e:
354
+ print(f"⚠️ Error loading image: {e}")
355
+ return None
356
+
357
+ def extract_images_from_zip(zip_path: str) -> List[Image.Image]:
358
+ """Extrae imágenes válidas de un archivo ZIP"""
359
+ images = []
360
+ try:
361
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
362
+ for filename in sorted(zip_ref.namelist()):
363
+ if is_valid_image(filename) and not filename.startswith('__MACOSX'):
364
+ try:
365
+ with zip_ref.open(filename) as img_file:
366
+ img_data = io.BytesIO(img_file.read())
367
+ img = Image.open(img_data).convert("RGB")
368
+ images.append((filename, img))
369
+ except Exception as e:
370
+ print(f"⚠️ Error extracting {filename}: {e}")
371
+ except Exception as e:
372
+ print(f"❌ Error opening ZIP: {e}")
373
+ return images
374
+
375
+ def create_output_zip(images: List[Tuple[str, Image.Image]], prefix: str = "colorized") -> str:
376
+ """Crea un ZIP con las imágenes coloreadas"""
377
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
378
+ output_path = os.path.join(tempfile.gettempdir(), f"{prefix}_{timestamp}.zip")
379
+
380
+ with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
381
+ for filename, img in images:
382
+ img_buffer = io.BytesIO()
383
+ # Determinar formato de salida
384
+ base_name = os.path.splitext(filename)[0]
385
+ img.save(img_buffer, format='PNG', quality=95)
386
+ img_buffer.seek(0)
387
+ zipf.writestr(f"{base_name}_colorized.png", img_buffer.read())
388
+
389
+ return output_path
390
+
391
+ def build_colorization_prompt(
392
+ base_prompt: str,
393
+ style_adapter: str,
394
+ num_references: int,
395
+ custom_details: str = ""
396
+ ) -> str:
397
+ """Construye un prompt optimizado para colorización"""
398
+ spec = ADAPTER_SPECS.get(style_adapter, {})
399
+ template = spec.get("prompt_template", "")
400
+
401
+ prompt_parts = []
402
+
403
+ # Prompt base del adaptador
404
+ if template:
405
+ prompt_parts.append(template)
406
+
407
+ # Prompt del usuario
408
+ if base_prompt:
409
+ prompt_parts.append(base_prompt)
410
+
411
+ # Referencia a imágenes de estilo
412
+ if num_references > 0:
413
+ if num_references == 1:
414
+ prompt_parts.append("Use the style reference image as a guide for colors and shading.")
415
+ else:
416
+ prompt_parts.append(f"Analyze the {num_references} reference images for color palette, shading style, and artistic approach.")
417
+
418
+ # Detalles adicionales
419
+ if custom_details:
420
+ prompt_parts.append(custom_details)
421
+
422
+ # Instrucciones de calidad
423
+ prompt_parts.append("Maintain high detail, preserve line art, and ensure color consistency throughout.")
424
+
425
+ return " ".join(prompt_parts)
426
+
427
+ # ============================================
428
+ # FUNCIONES DE INFERENCIA
429
+ # ============================================
430
+
431
  @spaces.GPU
432
+ def colorize_single_image(
433
+ manga_images,
434
+ reference_images,
435
+ prompt: str,
436
+ lora_adapter: str,
437
+ custom_details: str,
438
+ seed: int,
439
+ randomize_seed: bool,
440
+ guidance_scale: float,
441
+ steps: int,
442
  progress=gr.Progress(track_tqdm=True)
443
+ ) -> Tuple[Image.Image, int, str]:
444
+ """Coloriza una imagen de manga usando referencias de estilo"""
445
  gc.collect()
446
  torch.cuda.empty_cache()
447
+
448
+ # Validar imágenes de manga
449
+ if not manga_images:
450
+ raise gr.Error("❌ Por favor sube al menos una imagen de manga para colorear.")
451
+
452
+ # Cargar imágenes de manga
453
+ pil_manga = []
454
+ if manga_images is not None:
455
+ for item in manga_images:
456
+ img = load_image_safe(item)
457
+ if img:
458
+ pil_manga.append(img)
459
+
460
+ if not pil_manga:
461
+ raise gr.Error("❌ No se pudieron procesar las imágenes de manga.")
462
+
463
+ # Cargar imágenes de referencia
464
+ pil_references = []
465
+ if reference_images is not None:
466
+ for item in reference_images:
467
+ img = load_image_safe(item)
468
+ if img:
469
+ pil_references.append(img)
470
+
471
+ # Combinar imágenes: manga + referencias
472
+ all_images = pil_manga + pil_references
473
+
474
+ # Cargar adaptador
475
  spec = ADAPTER_SPECS.get(lora_adapter)
476
  if not spec:
477
+ raise gr.Error(f" Adaptador no encontrado: {lora_adapter}")
478
+
479
  adapter_name = spec["adapter_name"]
480
+
481
  if adapter_name not in LOADED_ADAPTERS:
482
+ print(f"📥 Cargando adaptador: {lora_adapter}")
483
  try:
484
  pipe.load_lora_weights(
485
+ spec["repo"],
486
+ weight_name=spec["weights"],
487
  adapter_name=adapter_name
488
  )
489
  LOADED_ADAPTERS.add(adapter_name)
490
  except Exception as e:
491
+ raise gr.Error(f" Error cargando adaptador {lora_adapter}: {e}")
492
  else:
493
+ print(f" Adaptador {lora_adapter} ya está cargado")
494
+
495
  pipe.set_adapters([adapter_name], adapter_weights=[1.0])
496
+
497
+ # Seed
498
  if randomize_seed:
499
  seed = random.randint(0, MAX_SEED)
500
+
501
  generator = torch.Generator(device=device).manual_seed(seed)
502
+
503
+ # Construir prompt
504
+ full_prompt = build_colorization_prompt(
505
+ prompt, lora_adapter, len(pil_references), custom_details
506
+ )
507
+
508
+ negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, grayscale, monochrome, black and white, desaturated"
509
+
510
+ # Calcular dimensiones
511
+ width, height = calculate_dimensions(pil_manga[0])
512
+
513
  try:
514
  result_image = pipe(
515
+ image=all_images,
516
+ prompt=full_prompt,
517
  negative_prompt=negative_prompt,
518
  height=height,
519
  width=width,
 
522
  true_cfg_scale=guidance_scale,
523
  ).images[0]
524
 
525
+ return result_image, seed, f"✅ Colorización completada | Seed: {seed}"
526
+
527
  except Exception as e:
528
+ raise gr.Error(f"❌ Error durante la colorización: {e}")
529
  finally:
530
  gc.collect()
531
  torch.cuda.empty_cache()
532
 
533
  @spaces.GPU
534
+ def colorize_batch_from_zip(
535
+ zip_file,
536
+ reference_images,
537
+ prompt: str,
538
+ lora_adapter: str,
539
+ custom_details: str,
540
+ seed: int,
541
+ randomize_seed: bool,
542
+ guidance_scale: float,
543
+ steps: int,
544
+ progress=gr.Progress(track_tqdm=True)
545
+ ) -> Tuple[str, List[Image.Image], str]:
546
+ """Procesa un ZIP de imágenes de manga"""
547
+ gc.collect()
548
+ torch.cuda.empty_cache()
549
 
550
+ if zip_file is None:
551
+ raise gr.Error("❌ Por favor sube un archivo ZIP con imágenes de manga.")
552
+
553
+ # Extraer imágenes del ZIP
554
+ zip_path = zip_file.name if hasattr(zip_file, 'name') else zip_file
555
+ manga_images = extract_images_from_zip(zip_path)
556
+
557
+ if not manga_images:
558
+ raise gr.Error("❌ No se encontraron imágenes válidas en el ZIP.")
559
+
560
+ # Cargar referencias
561
+ pil_references = []
562
+ if reference_images is not None:
563
+ for item in reference_images:
564
+ img = load_image_safe(item)
565
+ if img:
566
+ pil_references.append(img)
567
+
568
+ # Cargar adaptador
569
+ spec = ADAPTER_SPECS.get(lora_adapter)
570
+ if not spec:
571
+ raise gr.Error(f"❌ Adaptador no encontrado: {lora_adapter}")
572
+
573
+ adapter_name = spec["adapter_name"]
574
+
575
+ if adapter_name not in LOADED_ADAPTERS:
576
+ print(f"📥 Cargando adaptador: {lora_adapter}")
577
+ pipe.load_lora_weights(
578
+ spec["repo"],
579
+ weight_name=spec["weights"],
580
+ adapter_name=adapter_name
581
+ )
582
+ LOADED_ADAPTERS.add(adapter_name)
583
+
584
+ pipe.set_adapters([adapter_name], adapter_weights=[1.0])
585
+
586
+ # Procesar cada imagen
587
+ colorized_images = []
588
+ total = len(manga_images)
589
+
590
+ for idx, (filename, manga_img) in enumerate(manga_images):
591
+ progress((idx + 1) / total, f"Procesando {idx + 1}/{total}: {filename}")
592
 
593
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else seed + idx
594
+ generator = torch.Generator(device=device).manual_seed(current_seed)
595
+
596
+ all_images = [manga_img] + pil_references
597
+
598
+ full_prompt = build_colorization_prompt(
599
+ prompt, lora_adapter, len(pil_references), custom_details
600
+ )
601
+
602
+ negative_prompt = "worst quality, low quality, grayscale, monochrome, black and white"
603
+
604
+ width, height = calculate_dimensions(manga_img)
605
+
606
+ try:
607
+ result = pipe(
608
+ image=all_images,
609
+ prompt=full_prompt,
610
+ negative_prompt=negative_prompt,
611
+ height=height,
612
+ width=width,
613
+ num_inference_steps=steps,
614
+ generator=generator,
615
+ true_cfg_scale=guidance_scale,
616
+ ).images[0]
617
+
618
+ colorized_images.append((filename, result))
619
+
620
+ except Exception as e:
621
+ print(f"⚠️ Error procesando {filename}: {e}")
622
+ continue
623
+
624
+ gc.collect()
625
+ torch.cuda.empty_cache()
626
+
627
+ if not colorized_images:
628
+ raise gr.Error("❌ No se pudo colorizar ninguna imagen.")
629
+
630
+ # Crear ZIP de salida
631
+ output_zip = create_output_zip(colorized_images)
632
+
633
+ # Obtener previsualizaciones (máximo 6)
634
+ preview_images = [img for _, img in colorized_images[:6]]
635
+
636
+ return output_zip, preview_images, f"✅ {len(colorized_images)}/{total} imágenes colorizadas"
637
+
638
+ def update_prompt_template(adapter_name: str) -> str:
639
+ """Actualiza el placeholder del prompt según el adaptador"""
640
+ spec = ADAPTER_SPECS.get(adapter_name, {})
641
+ return spec.get("prompt_template", "Describe how you want to colorize the manga...")
642
+
643
+ def get_adapter_info(adapter_name: str) -> str:
644
+ """Obtiene información del adaptador"""
645
+ spec = ADAPTER_SPECS.get(adapter_name, {})
646
+ return spec.get("description", "Sin descripción disponible")
647
+
648
+ # ============================================
649
+ # INTERFAZ GRADIO
650
+ # ============================================
651
+
652
+ css = """
653
+ #main-container { max-width: 1400px; margin: 0 auto; }
654
+ #title { text-align: center; margin-bottom: 10px; }
655
+ #title h1 { font-size: 2.5em !important; background: linear-gradient(90deg, #8B5CF6, #EC4899); -webkit-background-clip: text; -webkit-text-fill-color: transparent; }
656
+ .info-box { background: linear-gradient(135deg, #1e1b4b, #312e81); padding: 15px; border-radius: 10px; margin: 10px 0; }
657
+ .tips-box { background: #fef3c7; padding: 10px; border-radius: 8px; border-left: 4px solid #f59e0b; }
658
  """
659
 
660
+ with gr.Blocks(css=css, theme=manga_theme) as demo:
661
+ with gr.Column(elem_id="main-container"):
662
+ # Header
663
+ gr.Markdown(
664
+ """
665
+ # 🎨 Manga Colorizer Pro
666
+ ### Coloriza manga y cómics con IA usando imágenes de referencia
667
+ """,
668
+ elem_id="title"
669
+ )
670
+
671
+ with gr.Tabs():
672
+ # ============================================
673
+ # TAB 1: IMAGEN INDIVIDUAL
674
+ # ============================================
675
+ with gr.TabItem("📷 Imagen Individual", id="single"):
676
+ with gr.Row(equal_height=False):
677
+ # Columna izquierda: Entradas
678
+ with gr.Column(scale=1):
679
+ manga_gallery = gr.Gallery(
680
+ label="🖼️ Manga a Colorear",
681
+ type="filepath",
682
+ columns=2,
683
+ rows=2,
684
+ height=250,
685
+ allow_preview=True,
686
+ elem_id="manga-input"
687
+ )
688
+
689
+ reference_gallery = gr.Gallery(
690
+ label="🎨 Imágenes de Referencia de Estilo (1-5)",
691
+ type="filepath",
692
+ columns=5,
693
+ rows=1,
694
+ height=120,
695
+ allow_preview=True,
696
+ elem_id="reference-input"
697
+ )
698
+
699
+ gr.Markdown(
700
+ """
701
+ <div class="tips-box">
702
+ 💡 <b>Tip:</b> Sube imágenes coloreadas del mismo manga/estilo como referencia para mejores resultados.
703
+ </div>
704
+ """
705
+ )
706
+
707
+ lora_dropdown = gr.Dropdown(
708
+ label="🎭 Estilo de Colorización",
709
+ choices=list(ADAPTER_SPECS.keys()),
710
+ value="🎨 Style-Transfer",
711
+ info="Selecciona el estilo de colorización"
712
+ )
713
+
714
+ adapter_info = gr.Markdown(
715
+ value=get_adapter_info("🎨 Style-Transfer"),
716
+ elem_id="adapter-info"
717
+ )
718
+
719
+ prompt_input = gr.Textbox(
720
+ label="✏️ Prompt de Colorización",
721
+ placeholder="Describe cómo quieres colorizar el manga...",
722
+ lines=3,
723
+ value=""
724
+ )
725
+
726
+ custom_details = gr.Textbox(
727
+ label="📝 Detalles Adicionales (opcional)",
728
+ placeholder="Ej: colores vibrantes, piel clara, cabello azul, fondo atardecer...",
729
+ lines=2
730
+ )
731
+
732
+ with gr.Accordion("⚙️ Configuración Avanzada", open=False):
733
+ seed_slider = gr.Slider(
734
+ label="Seed",
735
+ minimum=0,
736
+ maximum=MAX_SEED,
737
+ step=1,
738
+ value=0
739
+ )
740
+ randomize_seed = gr.Checkbox(
741
+ label="🎲 Randomizar Seed",
742
+ value=True
743
+ )
744
+ guidance_scale = gr.Slider(
745
+ label="Guidance Scale",
746
+ minimum=1.0,
747
+ maximum=10.0,
748
+ step=0.1,
749
+ value=4.0
750
+ )
751
+ steps_slider = gr.Slider(
752
+ label="Inference Steps",
753
+ minimum=1,
754
+ maximum=50,
755
+ step=1,
756
+ value=20
757
+ )
758
+
759
+ colorize_btn = gr.Button(
760
+ "🎨 Colorizar Manga",
761
+ variant="primary",
762
+ size="lg"
763
+ )
764
+
765
+ # Columna derecha: Salida
766
+ with gr.Column(scale=1):
767
+ output_image = gr.Image(
768
+ label="🖼️ Resultado Colorizado",
769
+ interactive=False,
770
+ format="png",
771
+ height=450
772
+ )
773
+
774
+ output_seed = gr.Number(
775
+ label="Seed Utilizado",
776
+ interactive=False
777
+ )
778
+
779
+ status_text = gr.Markdown(value="")
780
+
781
+ # Eventos
782
+ lora_dropdown.change(
783
+ fn=get_adapter_info,
784
+ inputs=[lora_dropdown],
785
+ outputs=[adapter_info]
786
  )
787
 
788
+ colorize_btn.click(
789
+ fn=colorize_single_image,
790
+ inputs=[
791
+ manga_gallery,
792
+ reference_gallery,
793
+ prompt_input,
794
+ lora_dropdown,
795
+ custom_details,
796
+ seed_slider,
797
+ randomize_seed,
798
+ guidance_scale,
799
+ steps_slider
800
+ ],
801
+ outputs=[output_image, output_seed, status_text]
802
  )
803
+
804
+ # ============================================
805
+ # TAB 2: PROCESAMIENTO BATCH (ZIP)
806
+ # ============================================
807
+ with gr.TabItem("📦 Batch (ZIP)", id="batch"):
808
+ with gr.Row(equal_height=False):
809
+ # Columna izquierda
810
+ with gr.Column(scale=1):
811
+ zip_input = gr.File(
812
+ label="📁 Archivo ZIP con Manga",
813
+ file_types=[".zip"],
814
+ type="filepath"
815
+ )
816
+
817
+ gr.Markdown(
818
+ """
819
+ <div class="tips-box">
820
+ 📦 <b>Formatos soportados:</b> JPG, JPEG, PNG, WebP, BMP, GIF, TIFF
821
+ </div>
822
+ """
823
+ )
824
+
825
+ batch_reference_gallery = gr.Gallery(
826
+ label="🎨 Imágenes de Referencia de Estilo",
827
+ type="filepath",
828
+ columns=5,
829
+ rows=1,
830
+ height=120
831
+ )
832
+
833
+ batch_lora_dropdown = gr.Dropdown(
834
+ label="🎭 Estilo de Colorización",
835
+ choices=list(ADAPTER_SPECS.keys()),
836
+ value="🎨 Style-Transfer"
837
+ )
838
+
839
+ batch_prompt = gr.Textbox(
840
+ label="✏️ Prompt de Colorización",
841
+ placeholder="Describe el estilo de colorización...",
842
+ lines=2
843
+ )
844
+
845
+ batch_details = gr.Textbox(
846
+ label="📝 Detalles Adicionales",
847
+ placeholder="Detalles específicos para todo el batch...",
848
+ lines=2
849
+ )
850
+
851
+ with gr.Accordion("⚙️ Configuración Avanzada", open=False):
852
+ batch_seed = gr.Slider(
853
+ label="Seed Base",
854
+ minimum=0,
855
+ maximum=MAX_SEED,
856
+ step=1,
857
+ value=0
858
+ )
859
+ batch_randomize = gr.Checkbox(
860
+ label="🎲 Randomizar Seed por imagen",
861
+ value=True
862
+ )
863
+ batch_guidance = gr.Slider(
864
+ label="Guidance Scale",
865
+ minimum=1.0,
866
+ maximum=10.0,
867
+ step=0.1,
868
+ value=4.0
869
+ )
870
+ batch_steps = gr.Slider(
871
+ label="Inference Steps",
872
+ minimum=1,
873
+ maximum=50,
874
+ step=1,
875
+ value=15
876
+ )
877
+
878
+ batch_btn = gr.Button(
879
+ "🎨 Procesar Batch",
880
+ variant="primary",
881
+ size="lg"
882
+ )
883
+
884
+ # Columna derecha
885
+ with gr.Column(scale=1):
886
+ output_zip = gr.File(
887
+ label="📥 Descargar ZIP Colorizado"
888
+ )
889
+
890
+ preview_gallery = gr.Gallery(
891
+ label="👁️ Vista Previa (primeras 6)",
892
+ columns=3,
893
+ rows=2,
894
+ height=300
895
+ )
896
+
897
+ batch_status = gr.Markdown(value="")
898
 
899
+ batch_btn.click(
900
+ fn=colorize_batch_from_zip,
901
+ inputs=[
902
+ zip_input,
903
+ batch_reference_gallery,
904
+ batch_prompt,
905
+ batch_lora_dropdown,
906
+ batch_details,
907
+ batch_seed,
908
+ batch_randomize,
909
+ batch_guidance,
910
+ batch_steps
911
+ ],
912
+ outputs=[output_zip, preview_gallery, batch_status]
913
+ )
914
+
915
+ # ============================================
916
+ # TAB 3: GUÍA DE USO
917
+ # ============================================
918
+ with gr.TabItem("📖 Guía", id="guide"):
919
+ gr.Markdown(
920
+ """
921
+ ## 🎨 Cómo Colorizar Manga con este Space
922
+
923
+ ### 1️⃣ Preparar Imágenes de Referencia
924
+ Para obtener los mejores resultados, sube **1-5 imágenes coloreadas** que muestren:
925
+ - El estilo de color que deseas (paleta de colores)
926
+ - El tipo de sombreado (cel shading, gradientes, etc.)
927
+ - Colores de personajes específicos (piel, cabello, ojos)
928
+
929
+ ### 2️⃣ Elegir el Adaptador Correcto
930
+
931
+ | Adaptador | Mejor Para |
932
+ |-----------|------------|
933
+ | 🎨 Style-Transfer | Transferir paleta de colores exacta |
934
+ | 💡 Light-Migration | Copiar iluminación y tonos |
935
+ | 🌟 Any-Light | Efectos de luz dramáticos |
936
+ | 🌸 Photo-to-Anime | Estilo anime vibrante |
937
+ | 🖼️ Anything2Real | Colores realistas |
938
+
939
+ ### 3️⃣ Escribir un Buen Prompt
940
+
941
+ **Ejemplo de prompt efectivo:**
942
+ ```
943
+ Colorize this manga panel with vibrant anime colors.
944
+ Character has pale skin, bright blue eyes, and silver hair.
945
+ Background should be warm sunset tones with orange and pink.
946
+ Use soft cel shading with highlights.
947
+ ```
948
+
949
+ ### 4️⃣ Consejos Pro
950
+
951
+ - 🎯 **Más referencias = Mejor consistencia** de color
952
+ - 🔄 Si el resultado no es bueno, prueba otro seed
953
+ - ⚡ Para batches grandes, usa menos steps (10-15)
954
+ - 🎨 Combina Style-Transfer + referencias del mismo manga
955
+
956
+ ### 5️⃣ Formatos Soportados
957
+
958
+ **Imágenes:** JPG, JPEG, PNG, WebP, BMP, GIF, TIFF
959
+ **Archivos:** ZIP (para procesamiento batch)
960
+
961
+ ---
962
+
963
+ ## 📚 Lista Completa de Adaptadores
964
+ """
965
+ )
966
 
967
+ # Mostrar todos los adaptadores
968
+ adapter_md = ""
969
+ for name, spec in ADAPTER_SPECS.items():
970
+ adapter_md += f"- **{name}**: {spec['description']}\n"
971
+
972
+ gr.Markdown(adapter_md)
973
 
974
+ # Footer
975
+ gr.Markdown(
976
+ """
977
+ ---
978
+ <center>
979
+ 🎨 Manga Colorizer Pro | Powered by Qwen-Image-Edit-2511 |
980
+ <a href="https://huggingface.co/Qwen/Qwen-Image-Edit-2511" target="_blank">Modelo Base</a> |
981
+ <a href="https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511" target="_blank">LoRAs Disponibles</a>
982
+ </center>
983
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
984
  )
 
 
 
 
 
 
 
 
985
 
986
+ # ============================================
987
+ # EJECUCIÓN
988
+ # ============================================
989
  if __name__ == "__main__":
990
+ demo.queue(max_size=20).launch(
991
+ share=False,
992
+ show_error=True,
993
+ ssr_mode=False
994
+ )