This model is a finetuned Flux.2-Klein-4B model. The model as been quantized for use on the T4 in the Google Colab environment. Code on how to use this quantization is provided in this model card ⬇️.

image

From: https://civitai.red/models/2327389/flux2-klein-aio?modelVersionId=2618128 (the most popular Klein 4B finetune currently)

Use in google Colab

# =============================================================================
#@markdown # **CELL 1**: Mount Drive + HF auth
# =============================================================================

from google.colab import drive, userdata
from huggingface_hub import login
import torch
import os
import gc
import shutil

drive.mount('/content/drive')

hf_token = userdata.get('HF_TOKEN')
if hf_token:
    login(token=hf_token)
else:
    print("⚠️ No HF_TOKEN found in secrets.")

print("🧹 Removing old diffusers...")
!pip uninstall -y diffusers > /dev/null 2>&1
!rm -rf /usr/local/lib/python3.12/dist-packages/diffusers* ~/.cache/pip/*diffusers*

print("πŸ”„ Installing latest diffusers...")
!pip install -q git+https://github.com/huggingface/diffusers.git --force-reinstall --no-deps
!python -m pip cache purge

print("βœ… Cell 1 complete!")
#@title Set path to zip file on your drive , and set klein edit prompt
input_zip_path = '/content/drive/MyDrive/my_images.zip' #@param {type:'string'}
zip_path = input_zip_path
edit_prompt = 'make sure the bare skin against the  very dark background looks like a real photo. remove the background at the centre so that it has the same color as the corners. remove the corners. remove the background. the background is a single color. ' #@param }Β₯{type:'string'}
# =============================================================================
#@markdown # **CELL 2**: Fixed settings (resolution + options)
# =============================================================================

resolution = '1024 x 1024 (Square)' #@param ["1024 x 1024 (Square)", "512 x 1024 (Portrait)", "768 x 1024 (Slight Portrait)", "1536 x 1024 (Landscape)", "2048 x 1024 (Wide Landscape)"] {type:"string"}
use_txt_prompts = False #@param {type:"boolean"}
debug = False #@param {type:"boolean"}

res_map = {
    "1024 x 1024 (Square)": (1024, 1024),
    "512 x 1024 (Portrait)": (512, 1024),
    "768 x 1024 (Slight Portrait)": (768, 1024),
    "1536 x 1024 (Landscape)": (1536, 1024),
    "2048 x 1024 (Wide Landscape)": (2048, 1024)
}
target_width, target_height = res_map[resolution]

print("βœ… Cell 2 settings loaded")
print(f"   Resolution: {target_width}Γ—{target_height}")
print(f"   Use .txt prompts: {use_txt_prompts}")
print(f"   Debug mode: {debug}")
print("\nNow run Cell 3 (model load), then Cell 4 (inference)")
# =============================================================================
#@markdown # **CELL 3**: Load YOUR UPDATED SDNQ MODEL FROM HF + OPTIMIZATIONS
# =============================================================================

import torch
import gc
import os
from diffusers import Flux2KleinPipeline

print("πŸ“¦ Installing SDNQ...")
!pip install -q sdnq

from sdnq.common import use_torch_compile as triton_is_available
from sdnq.loader import apply_sdnq_options_to_model

gc.collect()
torch.cuda.empty_cache()

# =========================================================
# πŸ”₯ LOAD YOUR NEWLY UPLOADED MODEL FROM HF REPO
# =========================================================
MODEL_ID = "codeShare/FLUX.2-klein-AIO-SDNQ-4bit-dynamic"

print(f"πŸ”„ Loading model from: {MODEL_ID}")

pipe = Flux2KleinPipeline.from_pretrained(
    MODEL_ID,
    torch_dtype=torch.float16,
    low_cpu_mem_usage=True,
    device_map="cpu"   # IMPORTANT: prevents VRAM spike at load
)

print("βœ… Base pipeline loaded (CPU-safe)")

# =========================================================
# πŸ”₯ APPLY SDNQ OPTIMIZATIONS (AFTER LOAD)
# =========================================================
print("πŸ”₯ Applying SDNQ optimizations...")

os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

if torch.cuda.is_available():
    if triton_is_available:
        pipe.transformer = apply_sdnq_options_to_model(
            pipe.transformer,
            use_quantized_matmul=True
        )
        pipe.text_encoder = apply_sdnq_options_to_model(
            pipe.text_encoder,
            use_quantized_matmul=True
        )
        print("   βœ… Quantized matmul enabled")

# =========================================================
# πŸ”₯ MEMORY OPTIMIZATION
# =========================================================
pipe.enable_model_cpu_offload()
pipe.vae.enable_slicing()
pipe.vae.enable_tiling()

gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()

print("βœ… Cell 3 complete – NEW MODEL READY")
print("VRAM usage:", torch.cuda.memory_allocated() / 1e9, "GB")
print("πŸ‘‰ Run Cell 4 now")
# =============================================================================
#@markdown # **CELL 4**: MULTI-IMAGE INFERENCE (Klein Edit with custom/gray reference)
# β€’ First image  = your original (the one being edited)
# β€’ Second image = custom uploaded OR solid #181818 gray square
# β€’ Re-run this cell only after changing settings
# =============================================================================
import zipfile
import os
import shutil
import glob  # Added: Import glob module
from PIL import Image, ImageDraw # Added: Import Image and ImageDraw for image manipulation
import torch
import gc
import datetime
from google.colab import files # Added: Import files for file upload

#edit_prompt = 'remove the background. the background is gray.' #@param {type:"string"}
#zip_path = '/content/drive/MyDrive/aiotest.zip' #@param {type:"string"}
upload_custom_reference = False #@param {type:"boolean"}
max_image_dimension = 2048 #@param {type:"slider", min:512, max:4096, step:256}
add_gray_corners = True #@param {type:"boolean"}
gray_square_size = 50 #@param {type:"slider", min:10, max:200, step:10}

output_folder = '/content/edited_images_multi'

print("🧹 Clearing old temporary folders...")
if os.path.exists('/content/input_images'):
    shutil.rmtree('/content/input_images')
if os.path.exists(output_folder):
    shutil.rmtree(output_folder)
os.makedirs(output_folder, exist_ok=True)

# ====================== Unzip + build prompt map ======================
print(f"πŸ“¦ Unzipping: {zip_path}...")
with zipfile.ZipFile(zip_path, 'r') as z:
    z.extractall('/content/input_images')

image_files = sorted(glob.glob('/content/input_images/*.*'))
image_files = [f for f in image_files if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]
print(f"Found {len(image_files)} images.")

prompt_map = {}
txt_count = 0
for img_path in image_files:
    base = os.path.splitext(os.path.basename(img_path))[0]
    txt_path = os.path.join('/content/input_images', f"{base}.txt")
    if use_txt_prompts and os.path.exists(txt_path):
        with open(txt_path, 'r', encoding='utf-8') as f:
            prompt_map[img_path] = f.read().strip()
        txt_count += 1
    else:
        prompt_map[img_path] = None

print(f"Found {txt_count} matching .txt files")

# ====================== Create or load the second reference image ======================
uploaded_reference_filepath = None
reference_image_to_pair = None

if upload_custom_reference:
    print("Please upload your reference image now.")
    uploaded_files = files.upload()
    if uploaded_files:
        uploaded_reference_filepath = list(uploaded_files.keys())[0]
        print(f"βœ… Uploaded reference image: {uploaded_reference_filepath}")
        reference_image_to_pair = Image.open(uploaded_reference_filepath).convert("RGB")
else:
    print("🚫 Custom reference disabled β†’ using solid #181818 gray square")
    reference_image_to_pair = Image.new("RGB", (target_width, target_height), "#181818")

# Clean up temporary upload file
if uploaded_reference_filepath and os.path.exists(uploaded_reference_filepath):
    os.remove(uploaded_reference_filepath)

# ==================== Helper: add gray corner squares ====================
def add_corner_squares(image: Image.Image, square_size: int, color=(24, 24, 24)) -> Image.Image:
    img_copy = image.copy()
    draw = ImageDraw.Draw(img_copy)
    width, height = img_copy.size
    draw.rectangle([(0, 0), (square_size, square_size)], fill=color)
    draw.rectangle([(width - square_size, 0), (width, square_size)], fill=color)
    draw.rectangle([(0, height - square_size), (square_size, height)], fill=color)
    draw.rectangle([(width - square_size, height - square_size), (width, height)], fill=color)
    return img_copy
# ====================== BATCH INFERENCE ======================
print(f"\nπŸš€ Starting multi-image edit on {len(image_files)} images...")

for i, img_path in enumerate(image_files):
    filename = os.path.basename(img_path)
    gc.collect()
    torch.cuda.empty_cache()

    input_image = Image.open(img_path).convert("RGB")

    # Resize if too large
    if max(input_image.width, input_image.height) > max_image_dimension:
        print(f"   ⚠️ Resizing {filename} to max dimension {max_image_dimension}")
        aspect = input_image.width / input_image.height
        if input_image.width > input_image.height:
            new_w = max_image_dimension
            new_h = int(max_image_dimension / aspect)
        else:
            new_h = max_image_dimension
            new_w = int(max_image_dimension * aspect)
        input_image = input_image.resize((new_w, new_h), Image.LANCZOS)

    if add_gray_corners:
        input_image = add_corner_squares(input_image, gray_square_size)

    # Multi-image list for Klein Edit
    if reference_image_to_pair is not None:
        reference_images = [input_image, reference_image_to_pair]
        print(f"[{i+1}/{len(image_files)}] {filename}  β†’  + custom/gray reference")
    else:
        reference_images = [input_image]
        print(f"[{i+1}/{len(image_files)}] {filename}  β†’  (no reference)")

    current_prompt = prompt_map[img_path] if use_txt_prompts and prompt_map[img_path] is not None else edit_prompt

    result = pipe(
        prompt=current_prompt,
        image=reference_images,           # ← Klein multi-reference
        height=target_height,
        width=target_width,
        guidance_scale=1.0,
        num_inference_steps=4,
        generator=torch.Generator("cuda").manual_seed(42),
        output_type="pil",
    ).images[0]

    output_path = os.path.join(output_folder, f"multi_edited_{filename}")
    result.save(output_path)

print("\n→ MULTI-IMAGE BATCH COMPLETE!")

# ====================== ZIP TO DRIVE ======================
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
drive_zip_path = f"/content/drive/MyDrive/klein_edited_{timestamp}.zip"

print(f"πŸ“¦ Creating zip β†’ {drive_zip_path}")
shutil.make_archive(drive_zip_path.replace('.zip', ''), 'zip', output_folder)

print("βœ… ALL DONE!")
print(f"Zip saved to Google Drive: {drive_zip_path}")
# πŸ”Œ Auto Disconnect from Drive

Results from earlier cell have been saved to your drive as
'content/drive/MyDrive/klein_processed_(date_number).zip'

You can disconnect from the session , and reconnect to new runtime to further process the edits. 
Original Model
β€’  Official Base: black-forest-labs/FLUX.2-klein-4B
β€’  Reference Quantized Model: Disty0/FLUX.2-klein-4B-SDNQ-4bit-dynamic
License
Same as the original FLUX.2 models from Black Forest Labs.
Non-commercial research & personal use only.
See the original FLUX license for full details.
Downloads last month
275
Inference Providers NEW
This model isn't deployed by any Inference Provider. πŸ™‹ Ask for provider support

Model tree for codeShare/FLUX.2-klein-AIO-SDNQ-4bit-dynamic

Finetuned
(11)
this model