| | import numpy as np |
| | from torchvision import transforms |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | import PIL |
| | import random |
| | import os |
| | import matplotlib.pyplot as plt |
| | import pandas as pd |
| | import math |
| | import webdataset as wds |
| | import tempfile |
| | from torchvision.utils import make_grid |
| |
|
| | import json |
| | from torchmetrics.image.fid import FrechetInceptionDistance |
| | from PIL import Image |
| | import requests |
| | import io |
| | import time |
| |
|
| | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| |
|
| | def is_interactive(): |
| | import __main__ as main |
| | return not hasattr(main, '__file__') |
| |
|
| | def seed_everything(seed=0, cudnn_deterministic=True): |
| | random.seed(seed) |
| | os.environ['PYTHONHASHSEED'] = str(seed) |
| | np.random.seed(seed) |
| | torch.manual_seed(seed) |
| | torch.cuda.manual_seed(seed) |
| | torch.cuda.manual_seed_all(seed) |
| | if cudnn_deterministic: |
| | torch.backends.cudnn.deterministic = True |
| | else: |
| | |
| | print('Note: not using cudnn.deterministic') |
| |
|
| | def np_to_Image(x): |
| | if x.ndim==4: |
| | x=x[0] |
| | return PIL.Image.fromarray((x.transpose(1, 2, 0)*127.5+128).clip(0,255).astype('uint8')) |
| |
|
| | def torch_to_Image(x): |
| | if x.ndim==4: |
| | x=x[0] |
| | return transforms.ToPILImage()(x) |
| |
|
| | def Image_to_torch(x): |
| | try: |
| | x = (transforms.ToTensor()(x)[:3].unsqueeze(0)-.5)/.5 |
| | except: |
| | x = (transforms.ToTensor()(x[0])[:3].unsqueeze(0)-.5)/.5 |
| | return x |
| |
|
| | def torch_to_matplotlib(x,device=device): |
| | if torch.mean(x)>10: |
| | x = (x.permute(0, 2, 3, 1)).clamp(0, 255).to(torch.uint8) |
| | else: |
| | x = (x.permute(0, 2, 3, 1) * 255).clamp(0, 255).to(torch.uint8) |
| | if device=='cpu': |
| | return x[0] |
| | else: |
| | return x.cpu().numpy()[0] |
| |
|
| | def pairwise_cosine_similarity(A, B, dim=1, eps=1e-8): |
| | |
| | numerator = A @ B.T |
| | A_l2 = torch.mul(A, A).sum(axis=dim) |
| | B_l2 = torch.mul(B, B).sum(axis=dim) |
| | denominator = torch.max(torch.sqrt(torch.outer(A_l2, B_l2)), torch.tensor(eps)) |
| | return torch.div(numerator, denominator) |
| |
|
| | def batchwise_pearson_correlation(Z, B): |
| | |
| | Z_mean = torch.mean(Z, dim=1, keepdim=True) |
| | B_mean = torch.mean(B, dim=1, keepdim=True) |
| |
|
| | |
| | Z_centered = Z - Z_mean |
| | B_centered = B - B_mean |
| |
|
| | |
| | numerator = Z_centered @ B_centered.T |
| | Z_centered_norm = torch.linalg.norm(Z_centered, dim=1, keepdim=True) |
| | B_centered_norm = torch.linalg.norm(B_centered, dim=1, keepdim=True) |
| | denominator = Z_centered_norm @ B_centered_norm.T |
| |
|
| | pearson_correlation = (numerator / denominator) |
| | return pearson_correlation |
| |
|
| | def batchwise_cosine_similarity(Z,B): |
| | Z = Z.flatten(1) |
| | B = B.flatten(1).T |
| | Z_norm = torch.linalg.norm(Z, dim=1, keepdim=True) |
| | B_norm = torch.linalg.norm(B, dim=0, keepdim=True) |
| | cosine_similarity = ((Z @ B) / (Z_norm @ B_norm)).T |
| | return cosine_similarity |
| |
|
| | def prenormed_batchwise_cosine_similarity(Z,B): |
| | return (Z @ B.T).T |
| |
|
| | def cosine_similarity(Z,B,l=0): |
| | Z = nn.functional.normalize(Z, p=2, dim=1) |
| | B = nn.functional.normalize(B, p=2, dim=1) |
| | |
| | |
| | Z = Z - l * torch.mean(Z,dim=0) |
| | B = B - l * torch.mean(B,dim=0) |
| | cosine_similarity = (Z @ B.T).T |
| | return cosine_similarity |
| |
|
| | def topk(similarities,labels,k=5): |
| | if k > similarities.shape[0]: |
| | k = similarities.shape[0] |
| | topsum=0 |
| | for i in range(k): |
| | topsum += torch.sum(torch.argsort(similarities,axis=1)[:,-(i+1)] == labels)/len(labels) |
| | return topsum |
| |
|
| | def get_non_diagonals(a): |
| | a = torch.triu(a,diagonal=1)+torch.tril(a,diagonal=-1) |
| | |
| | a=a.fill_diagonal_(-1) |
| | return a |
| |
|
| | def gather_features(image_features, voxel_features, accelerator): |
| | all_image_features = accelerator.gather(image_features.contiguous()) |
| | if voxel_features is not None: |
| | all_voxel_features = accelerator.gather(voxel_features.contiguous()) |
| | return all_image_features, all_voxel_features |
| | return all_image_features |
| |
|
| | def soft_clip_loss(preds, targs, temp=0.125): |
| | |
| | clip_clip = (targs @ targs.T)/temp |
| | brain_clip = (preds @ targs.T)/temp |
| | |
| | |
| | |
| | |
| | |
| | loss1 = -(brain_clip.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean() |
| | loss2 = -(brain_clip.T.log_softmax(-1) * clip_clip.softmax(-1)).sum(-1).mean() |
| | |
| | loss = (loss1 + loss2)/2 |
| | return loss |
| |
|
| | def soft_siglip_loss(preds, targs, temp, bias): |
| | temp = torch.exp(temp) |
| | |
| | logits = (preds @ targs.T) * temp + bias |
| | |
| | labels = (targs @ targs.T) - 1 + (torch.eye(len(targs)).to(targs.dtype).to(targs.device)) |
| |
|
| | loss1 = -torch.sum(nn.functional.logsigmoid(logits * labels[:len(preds)])) / len(preds) |
| | loss2 = -torch.sum(nn.functional.logsigmoid(logits.T * labels[:,:len(preds)])) / len(preds) |
| | loss = (loss1 + loss2)/2 |
| | return loss |
| |
|
| | def mixco_hard_siglip_loss(preds, targs, temp, bias, perm, betas): |
| | temp = torch.exp(temp) |
| | |
| | probs = torch.diag(betas) |
| | probs[torch.arange(preds.shape[0]).to(preds.device), perm] = 1 - betas |
| |
|
| | logits = (preds @ targs.T) * temp + bias |
| | labels = probs * 2 - 1 |
| | |
| | |
| | loss1 = -torch.sum(nn.functional.logsigmoid(logits * labels)) / len(preds) |
| | loss2 = -torch.sum(nn.functional.logsigmoid(logits.T * labels)) / len(preds) |
| | loss = (loss1 + loss2)/2 |
| | return loss |
| |
|
| | def mixco(voxels, beta=0.15, s_thresh=0.5, perm=None, betas=None, select=None): |
| | if perm is None: |
| | perm = torch.randperm(voxels.shape[0]) |
| | voxels_shuffle = voxels[perm].to(voxels.device,dtype=voxels.dtype) |
| | if betas is None: |
| | betas = torch.distributions.Beta(beta, beta).sample([voxels.shape[0]]).to(voxels.device,dtype=voxels.dtype) |
| | if select is None: |
| | select = (torch.rand(voxels.shape[0]) <= s_thresh).to(voxels.device) |
| | betas_shape = [-1] + [1]*(len(voxels.shape)-1) |
| | voxels[select] = voxels[select] * betas[select].reshape(*betas_shape) + \ |
| | voxels_shuffle[select] * (1 - betas[select]).reshape(*betas_shape) |
| | betas[~select] = 1 |
| | return voxels, perm, betas, select |
| |
|
| | def mixco_clip_target(clip_target, perm, select, betas): |
| | clip_target_shuffle = clip_target[perm] |
| | clip_target[select] = clip_target[select] * betas[select].reshape(-1, 1) + \ |
| | clip_target_shuffle[select] * (1 - betas[select]).reshape(-1, 1) |
| | return clip_target |
| |
|
| | def mixco_nce(preds, targs, temp=0.1, perm=None, betas=None, select=None, distributed=False, |
| | accelerator=None, local_rank=None, bidirectional=True): |
| | brain_clip = (preds @ targs.T)/temp |
| | |
| | if perm is not None and betas is not None and select is not None: |
| | probs = torch.diag(betas) |
| | probs[torch.arange(preds.shape[0]).to(preds.device), perm] = 1 - betas |
| |
|
| | loss = -(brain_clip.log_softmax(-1) * probs).sum(-1).mean() |
| | if bidirectional: |
| | loss2 = -(brain_clip.T.log_softmax(-1) * probs.T).sum(-1).mean() |
| | loss = (loss + loss2)/2 |
| | return loss |
| | else: |
| | loss = F.cross_entropy(brain_clip, torch.arange(brain_clip.shape[0]).to(brain_clip.device)) |
| | if bidirectional: |
| | loss2 = F.cross_entropy(brain_clip.T, torch.arange(brain_clip.shape[0]).to(brain_clip.device)) |
| | loss = (loss + loss2)/2 |
| | return loss |
| | |
| | def count_params(model): |
| | total = sum(p.numel() for p in model.parameters()) |
| | trainable = sum(p.numel() for p in model.parameters() if p.requires_grad) |
| | print('param counts:\n{:,} total\n{:,} trainable'.format(total, trainable)) |
| | return trainable |
| |
|
| | def image_grid(imgs, rows, cols): |
| | w, h = imgs[0].size |
| | grid = PIL.Image.new('RGB', size=(cols*w, rows*h)) |
| | for i, img in enumerate(imgs): |
| | grid.paste(img, box=(i%cols*w, i//cols*h)) |
| | return grid |
| | |
| | def check_loss(loss): |
| | if loss.isnan().any(): |
| | raise ValueError('NaN loss') |
| |
|
| | def cosine_anneal(start, end, steps): |
| | return end + (start - end)/2 * (1 + torch.cos(torch.pi*torch.arange(steps)/(steps-1))) |
| |
|
| | def resize(img, img_size=128): |
| | if img.ndim == 3: img = img[None] |
| | return nn.functional.interpolate(img, size=(img_size, img_size), mode='nearest') |
| |
|
| | import braceexpand |
| | def get_dataloaders( |
| | batch_size, |
| | image_var='images', |
| | num_devices=None, |
| | num_workers=None, |
| | train_url=None, |
| | val_url=None, |
| | meta_url=None, |
| | num_train=None, |
| | num_val=None, |
| | cache_dir="/scratch/tmp/wds-cache", |
| | seed=0, |
| | voxels_key="nsdgeneral.npy", |
| | val_batch_size=None, |
| | to_tuple=["voxels", "images", "trial"], |
| | local_rank=0, |
| | world_size=1, |
| | ): |
| | print("Getting dataloaders...") |
| | assert image_var == 'images' |
| | |
| | def my_split_by_node(urls): |
| | return urls |
| | |
| | train_url = list(braceexpand.braceexpand(train_url)) |
| | val_url = list(braceexpand.braceexpand(val_url)) |
| |
|
| | if num_devices is None: |
| | num_devices = torch.cuda.device_count() |
| | |
| | if num_workers is None: |
| | num_workers = num_devices |
| | |
| | if num_train is None: |
| | metadata = json.load(open(meta_url)) |
| | num_train = metadata['totals']['train'] |
| | if num_val is None: |
| | metadata = json.load(open(meta_url)) |
| | num_val = metadata['totals']['val'] |
| |
|
| | if val_batch_size is None: |
| | val_batch_size = batch_size |
| | |
| | global_batch_size = batch_size * num_devices |
| | num_batches = math.floor(num_train / global_batch_size) |
| | num_worker_batches = math.floor(num_batches / num_workers) |
| | if num_worker_batches == 0: num_worker_batches = 1 |
| | |
| | print("\nnum_train",num_train) |
| | print("global_batch_size",global_batch_size) |
| | print("batch_size",batch_size) |
| | print("num_workers",num_workers) |
| | print("num_batches",num_batches) |
| | print("num_worker_batches", num_worker_batches) |
| | |
| | |
| | train_data = wds.WebDataset(train_url, resampled=False, cache_dir=cache_dir, nodesplitter=my_split_by_node)\ |
| | .shuffle(500, initial=500, rng=random.Random(42))\ |
| | .decode("torch")\ |
| | .rename(images="jpg;png", voxels=voxels_key, trial="trial.npy", coco="coco73k.npy", reps="num_uniques.npy")\ |
| | .to_tuple(*to_tuple) |
| | |
| | |
| | |
| | |
| | train_dl = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=1, shuffle=False) |
| |
|
| | |
| | print("val_batch_size",val_batch_size) |
| | val_data = wds.WebDataset(val_url, resampled=False, cache_dir=cache_dir, nodesplitter=my_split_by_node)\ |
| | .shuffle(500, initial=500, rng=random.Random(42))\ |
| | .decode("torch")\ |
| | .rename(images="jpg;png", voxels=voxels_key, trial="trial.npy", coco="coco73k.npy", reps="num_uniques.npy")\ |
| | .to_tuple(*to_tuple) |
| | |
| | val_dl = torch.utils.data.DataLoader(val_data, batch_size=val_batch_size, num_workers=1, shuffle=False, drop_last=True) |
| |
|
| | return train_dl, val_dl, num_train, num_val |
| |
|
| | pixcorr_preprocess = transforms.Compose([ |
| | transforms.Resize(425, interpolation=transforms.InterpolationMode.BILINEAR), |
| | ]) |
| | def pixcorr(images,brains,nan=True): |
| | all_images_flattened = pixcorr_preprocess(images).reshape(len(images), -1) |
| | all_brain_recons_flattened = pixcorr_preprocess(brains).view(len(brains), -1) |
| | if nan: |
| | corrmean = torch.nanmean(torch.diag(batchwise_pearson_correlation(all_images_flattened, all_brain_recons_flattened))) |
| | else: |
| | corrmean = torch.mean(torch.diag(batchwise_pearson_correlation(all_images_flattened, all_brain_recons_flattened))) |
| | return corrmean |
| |
|
| | def select_annotations(annots, random=True): |
| | """ |
| | There are 5 annotations per image. Select one of them for each image. |
| | """ |
| | for i, b in enumerate(annots): |
| | t = '' |
| | if random: |
| | |
| | while t == '': |
| | rand = torch.randint(5, (1,1))[0][0] |
| | t = b[rand] |
| | else: |
| | |
| | for j in range(5): |
| | if b[j] != '': |
| | t = b[j] |
| | break |
| | if i == 0: |
| | txt = np.array(t) |
| | else: |
| | txt = np.vstack((txt, t)) |
| | txt = txt.flatten() |
| | return txt |
| |
|
| | def add_saturation(image, alpha=2): |
| | gray_image = 0.2989 * image[:, 0, :, :] + 0.5870 * image[:, 1, :, :] + 0.1140 * image[:, 2, :, :] |
| | gray_image = gray_image.unsqueeze(1).expand_as(image) |
| | saturated_image = alpha * image + (1 - alpha) * gray_image |
| | return torch.clamp(saturated_image, 0, 1) |
| |
|
| | def find_prompt_by_image_number(image_number, data): |
| | target_image_filename = f"img_t{image_number}.jpg" |
| | for entry in data: |
| | if 'target' in entry and entry['target'].endswith(target_image_filename): |
| | return entry['prompt'] |
| | return -1 |
| |
|
| | def compute_negative_l1_losses(preds, targets): |
| | batch_size = preds.size(0) |
| | |
| | |
| | expanded_preds = preds.unsqueeze(1) |
| | expanded_targets = targets.unsqueeze(0) |
| | |
| | |
| | l1_diffs = torch.abs(expanded_preds - expanded_targets) |
| | |
| | |
| | mask = torch.eye(batch_size).bool().to(l1_diffs.device) |
| | l1_diffs[mask] = 0 |
| | |
| | |
| | negative_losses = l1_diffs.sum(dim=-1).mean() |
| | |
| | return negative_losses |
| |
|
| |
|
| | def unclip_recon(x, diffusion_engine, vector_suffix, |
| | num_samples=1, offset_noise_level=0.04): |
| | from generative_models.sgm.util import append_dims |
| | assert x.ndim==3 |
| | if x.shape[0]==1: |
| | x = x[[0]] |
| | with torch.no_grad(), torch.cuda.amp.autocast(dtype=torch.float16), diffusion_engine.ema_scope(): |
| | z = torch.randn(num_samples,4,96,96).to(device) |
| |
|
| | |
| | |
| | token_shape = x.shape |
| | tokens = x |
| | c = {"crossattn": tokens.repeat(num_samples,1,1), "vector": vector_suffix.repeat(num_samples,1)} |
| |
|
| | tokens = torch.randn_like(x) |
| | uc = {"crossattn": tokens.repeat(num_samples,1,1), "vector": vector_suffix.repeat(num_samples,1)} |
| |
|
| | for k in c: |
| | c[k], uc[k] = map(lambda y: y[k][:num_samples].to(device), (c, uc)) |
| |
|
| | noise = torch.randn_like(z) |
| | sigmas = diffusion_engine.sampler.discretization(diffusion_engine.sampler.num_steps) |
| | sigma = sigmas[0].to(z.device) |
| |
|
| | if offset_noise_level > 0.0: |
| | noise = noise + offset_noise_level * append_dims( |
| | torch.randn(z.shape[0], device=z.device), z.ndim |
| | ) |
| | noised_z = z + noise * append_dims(sigma, z.ndim) |
| | noised_z = noised_z / torch.sqrt( |
| | 1.0 + sigmas[0] ** 2.0 |
| | ) |
| |
|
| | def denoiser(x, sigma, c): |
| | return diffusion_engine.denoiser(diffusion_engine.model, x, sigma, c) |
| |
|
| | samples_z = diffusion_engine.sampler(denoiser, noised_z, cond=c, uc=uc) |
| | samples_x = diffusion_engine.decode_first_stage(samples_z) |
| | samples = torch.clamp((samples_x*.8+.2), min=0.0, max=1.0) |
| | |
| | return samples |
| | |
| | def soft_cont_loss(student_preds, teacher_preds, teacher_aug_preds, temp=0.125): |
| | teacher_teacher_aug = (teacher_preds @ teacher_aug_preds.T)/temp |
| | teacher_teacher_aug_t = (teacher_aug_preds @ teacher_preds.T)/temp |
| | student_teacher_aug = (student_preds @ teacher_aug_preds.T)/temp |
| | student_teacher_aug_t = (teacher_aug_preds @ student_preds.T)/temp |
| |
|
| | loss1 = -(student_teacher_aug.log_softmax(-1) * teacher_teacher_aug.softmax(-1)).sum(-1).mean() |
| | loss2 = -(student_teacher_aug_t.log_softmax(-1) * teacher_teacher_aug_t.softmax(-1)).sum(-1).mean() |
| | |
| | loss = (loss1 + loss2)/2 |
| | return loss |
| |
|
| | def iterate_range(start, length, batchsize): |
| | batch_count = int(length // batchsize ) |
| | residual = int(length % batchsize) |
| | for i in range(batch_count): |
| | yield range(start+i*batchsize, start+(i+1)*batchsize),batchsize |
| | if(residual>0): |
| | yield range(start+batch_count*batchsize,start+length),residual |
| | |
| | |
| | |
| | def get_value(_x): |
| | return np.copy(_x.data.cpu().numpy()) |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | import pickle |
| | def condition_average(x, y, cond, nest=False): |
| | idx, idx_count = np.unique(cond, return_counts=True) |
| | idx_list = [np.array(cond)==i for i in np.sort(idx)] |
| | if nest: |
| | avg_x = torch.zeros((len(idx), idx_count.max(), x.shape[1]), dtype=torch.float32) |
| | else: |
| | avg_x = torch.zeros((len(idx), 1, x.shape[1]), dtype=torch.float32) |
| | for i, m in enumerate(idx_list): |
| | if nest: |
| | avg_x[i] = x[m] |
| | else: |
| | avg_x[i] = torch.mean(x[m], axis=0) |
| | |
| | return avg_x, y, len(idx_count) |
| | def load_nsd_mental_imagery(subject, mode, stimtype="all", average=False, nest=False): |
| | |
| | img_stim_file = "imagery/nsd_imagery/data/nsddata_stimuli/stimuli/nsdimagery_stimuli.pkl3" |
| | ex_file = open(img_stim_file, 'rb') |
| | imagery_dict = pickle.load(ex_file) |
| | ex_file.close() |
| | |
| | exps = imagery_dict['exps'] |
| | |
| | cues = imagery_dict['cues'] |
| | |
| | image_map = imagery_dict['image_map'] |
| | |
| | cond_idx = { |
| | 'visionsimple': np.arange(len(exps))[exps=='visA'], |
| | 'visioncomplex': np.arange(len(exps))[exps=='visB'], |
| | 'visionconcepts': np.arange(len(exps))[exps=='visC'], |
| | 'visionall': np.arange(len(exps))[np.logical_or(np.logical_or(exps=='visA', exps=='visB'), exps=='visC')], |
| | 'imagerysimple': np.arange(len(exps))[np.logical_or(exps=='imgA_1', exps=='imgA_2')], |
| | 'imagerycomplex': np.arange(len(exps))[np.logical_or(exps=='imgB_1', exps=='imgB_2')], |
| | 'imageryconcepts': np.arange(len(exps))[np.logical_or(exps=='imgC_1', exps=='imgC_2')], |
| | 'imageryall': np.arange(len(exps))[np.logical_or( |
| | np.logical_or( |
| | np.logical_or(exps=='imgA_1', exps=='imgA_2'), |
| | np.logical_or(exps=='imgB_1', exps=='imgB_2')), |
| | np.logical_or(exps=='imgC_1', exps=='imgC_2'))]} |
| | |
| | x = torch.load("imagery/nsd_imagery/data/preprocessed_data/subject{}/nsd_imagery.pt".format(subject)).requires_grad_(False).to("cpu") |
| | |
| | cond_im_idx = {n: [image_map[c] for c in cues[idx]] for n,idx in cond_idx.items()} |
| | conditionals = cond_im_idx[mode+stimtype] |
| | |
| | y = torch.load("imagery/nsd_imagery/data/nsddata_stimuli/stimuli/imagery_stimuli_18.pt").requires_grad_(False).to("cpu") |
| | |
| | x = x[cond_idx[mode+stimtype]] |
| | |
| | if stimtype == "simple": |
| | y = y[:6] |
| | elif stimtype == "complex": |
| | y = y[6:12] |
| | elif stimtype == "concepts": |
| | y = y[12:] |
| | |
| | |
| | if average or nest: |
| | x, y, sample_count = condition_average(x, y, conditionals, nest=nest) |
| | else: |
| | x = x.reshape((x.shape[0], 1, x.shape[1])) |
| | |
| | |
| | return x, y |
| | |
| | def bb_soft_clip_loss(preds, targs, temp=0.125): |
| | temp = np.exp(temp) |
| | clip_clip = (targs @ targs.T)/temp |
| | brain_brain = (preds @ preds.T)/temp |
| | |
| | |
| | |
| | |
| | |
| | loss = nn.functional.kl_div(brain_brain.log_softmax(-1), clip_clip.softmax(-1), reduction='batchmean') |
| | return loss |
| |
|
| | def bb_cossim_loss(preds, targs, temp=None): |
| | clip_clip = (targs @ targs.T) |
| | brain_brain = (preds @ preds.T) |
| | loss = 1 - nn.functional.cosine_similarity(brain_brain, clip_clip).mean() |
| | return loss |
| |
|
| | def load_images_to_numpy(folder_path): |
| | file_names = [f for f in os.listdir(folder_path) if (f.endswith('.png') or f.endswith('.jpg') or f.endswith('.jpeg'))] |
| | image_data = [] |
| | image_names = [] |
| | for file_name in file_names: |
| | image_path = os.path.join(folder_path, file_name) |
| | image_names.append(file_name) |
| | with Image.open(image_path) as img: |
| | img_array = np.array(img) |
| | if img_array.shape[1] != 224: |
| | img = img.resize((224,224)) |
| | img_array = np.array(img) |
| | image_data.append(img_array) |
| | images_np = np.stack(image_data, axis=0) |
| | return images_np, image_names |