| | from daam import trace, set_seed
|
| | from diffusers import DiffusionPipeline
|
| | from matplotlib import pyplot as plt
|
| | import torch
|
| | import os
|
| |
|
| |
|
| | if not torch.cuda.is_available():
|
| | raise RuntimeError("CUDA is not available. Please ensure a GPU is available and PyTorch is installed with CUDA support.")
|
| |
|
| |
|
| | output_dir = 'sdxl'
|
| | os.makedirs(output_dir, exist_ok=True)
|
| |
|
| |
|
| | model_id = 'stabilityai/stable-diffusion-xl-base-1.0'
|
| | device = 'cuda'
|
| |
|
| |
|
| | pipe = DiffusionPipeline.from_pretrained(
|
| | model_id,
|
| | torch_dtype=torch.float16,
|
| | use_safetensors=True,
|
| | variant='fp16'
|
| | )
|
| |
|
| |
|
| | pipe.enable_model_cpu_offload()
|
| | pipe.enable_vae_slicing()
|
| | pipe = pipe.to(device)
|
| |
|
| |
|
| | prompt = 'A human holding his hand up'
|
| | gen = set_seed(42)
|
| |
|
| |
|
| | with torch.no_grad():
|
| | with trace(pipe) as tc:
|
| | out = pipe(
|
| | prompt,
|
| | num_inference_steps=15,
|
| | generator=gen,
|
| | callback=tc.time_callback,
|
| | callback_steps=1
|
| | )
|
| |
|
| | generated_image_path = os.path.join(output_dir, 'generated_image.png')
|
| | out.images[0].save(generated_image_path)
|
| |
|
| |
|
| | heat_map = tc.compute_global_heat_map()
|
| | for word in prompt.split():
|
| | word_heat_map = heat_map.compute_word_heat_map(word)
|
| |
|
| |
|
| | fig = plt.figure()
|
| | word_heat_map.plot_overlay(out.images[0])
|
| | plt.title(f"Heatmap for '{word}'")
|
| |
|
| |
|
| | heatmap_path = os.path.join(output_dir, f'heatmap_{word}.png')
|
| | plt.savefig(heatmap_path, bbox_inches='tight')
|
| | plt.close(fig)
|
| |
|
| |
|
| | exp = tc.to_experiment('sdxl-cat-experiment-gpu')
|
| | exp.save()
|
| |
|
| | print(f"Generation complete! Images saved in '{output_dir}' folder:")
|
| | print(f"- Generated image: {generated_image_path}")
|
| | print(f"- Heatmaps: {output_dir}/heatmap_<word>.png")
|
| | print("Experiment saved in 'sdxl-cat-experiment-gpu'.") |