text
stringlengths
1
93.6k
img1 = Image.open(self.list_A[index]).convert('RGB')
img2 = Image.open(self.list_B[index]).convert('RGB')
# labl = Image.open(self.list_L[index]).convert('P')
if self.model == "SC":
labl = self.list_L[index]
return self.transform(img1), self.transform(img2), labl, name
else:
labl = Image.open(self.list_L[index]).convert('P')
return self.transform(img1), self.transform(img2), self.transforml(labl), name
def __len__(self):
return len(self.list_L) # len(self.files1)
# Configure dataloaders
def Get_dataloader(path,batch, reshape_size, model=None):
#Image.BICUBIC
transforms_ = [transforms.Resize(reshape_size),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))]
transforms_L = [transforms.Resize(reshape_size),
transforms.ToTensor(),
]
train_dataloader = DataLoader(
ImageDataset(path, transforms_=transforms_, transforms_L=transforms_L, model=model),
batch_size=batch, shuffle=True, num_workers=2, drop_last=True)
return train_dataloader
def Get_dataloader_test(path,batch, reshape_size=None, model=None):
transforms_ = [transforms.Resize(reshape_size),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]
transforms_L = [transforms.Resize(reshape_size),
transforms.ToTensor(),
]
test_dataloader = DataLoader(
ImageDataset_test(path, transforms_=transforms_, transforms_L=transforms_L, model=model),
batch_size=batch, shuffle=False, num_workers=2, drop_last=False)
return test_dataloader
# <FILESEP>
import inspect
from typing import List, Optional, Union
import torch
from tqdm.auto import tqdm
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from torch import autocast
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
PNDMScheduler,
UNet2DConditionModel,
LMSDiscreteScheduler,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
class StableDiffusionPipeline(DiffusionPipeline):
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
):
super().__init__()
scheduler = scheduler.set_format("pt")
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
height: Optional[int] = 512,
width: Optional[int] = 512,
num_inference_steps: Optional[int] = 50,
guidance_scale: Optional[float] = 7.5,
eta: Optional[float] = 0.0,
generator: Optional[torch.Generator] = None,
torch_device: Optional[Union[str, torch.device]] = None,