| | import os
|
| | import math
|
| | import random
|
| | import numpy as np
|
| | import torch
|
| | import cv2
|
| | from torchvision.utils import make_grid
|
| | from datetime import datetime
|
| |
|
| | import matplotlib.pyplot as plt
|
| |
|
| |
|
| | IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
|
| |
|
| |
|
| | def is_image_file(filename):
|
| | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
| |
|
| |
|
| | def get_timestamp():
|
| | return datetime.now().strftime('%y%m%d-%H%M%S')
|
| |
|
| |
|
| | def imshow(x, title=None, cbar=False, figsize=None):
|
| | plt.figure(figsize=figsize)
|
| | plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
|
| | if title:
|
| | plt.title(title)
|
| | if cbar:
|
| | plt.colorbar()
|
| | plt.show()
|
| |
|
| |
|
| | '''
|
| | # =======================================
|
| | # get image pathes of files
|
| | # =======================================
|
| | '''
|
| |
|
| |
|
| | def get_image_paths(dataroot):
|
| | paths = None
|
| | if dataroot is not None:
|
| | paths = sorted(_get_paths_from_images(dataroot))
|
| | return paths
|
| |
|
| |
|
| | def _get_paths_from_images(path):
|
| | assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
|
| | images = []
|
| | for dirpath, _, fnames in sorted(os.walk(path)):
|
| | for fname in sorted(fnames):
|
| | if is_image_file(fname):
|
| | img_path = os.path.join(dirpath, fname)
|
| | images.append(img_path)
|
| | assert images, '{:s} has no valid image file'.format(path)
|
| | return images
|
| |
|
| |
|
| | '''
|
| | # =======================================
|
| | # makedir
|
| | # =======================================
|
| | '''
|
| |
|
| |
|
| | def mkdir(path):
|
| | if not os.path.exists(path):
|
| | os.makedirs(path)
|
| |
|
| |
|
| | def mkdirs(paths):
|
| | if isinstance(paths, str):
|
| | mkdir(paths)
|
| | else:
|
| | for path in paths:
|
| | mkdir(path)
|
| |
|
| |
|
| | def mkdir_and_rename(path):
|
| | if os.path.exists(path):
|
| | new_name = path + '_archived_' + get_timestamp()
|
| | print('Path already exists. Rename it to [{:s}]'.format(new_name))
|
| | os.rename(path, new_name)
|
| | os.makedirs(path)
|
| |
|
| |
|
| | '''
|
| | # =======================================
|
| | # read image from path
|
| | # Note: opencv is fast
|
| | # but read BGR numpy image
|
| | # =======================================
|
| | '''
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def read_img(path):
|
| |
|
| |
|
| | img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
|
| | img = img.astype(np.float32) / 255.
|
| | if img.ndim == 2:
|
| | img = np.expand_dims(img, axis=2)
|
| |
|
| | if img.shape[2] > 3:
|
| | img = img[:, :, :3]
|
| | return img
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def imread_uint(path, n_channels=3):
|
| |
|
| |
|
| | if n_channels == 1:
|
| | img = cv2.imread(path, 0)
|
| | img = np.expand_dims(img, axis=2)
|
| | elif n_channels == 3:
|
| | img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
|
| | if img.ndim == 2:
|
| | img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
|
| | else:
|
| | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| | return img
|
| |
|
| |
|
| | def imsave(img, img_path):
|
| | img = np.squeeze(img)
|
| | if img.ndim == 3:
|
| | img = img[:, :, [2, 1, 0]]
|
| | cv2.imwrite(img_path, img)
|
| |
|
| |
|
| | '''
|
| | # =======================================
|
| | # numpy(single) <---> numpy(uint)
|
| | # numpy(single) <---> tensor
|
| | # numpy(uint) <---> tensor
|
| | # =======================================
|
| | '''
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def uint2single(img):
|
| |
|
| | return np.float32(img/255.)
|
| |
|
| |
|
| | def uint2single1(img):
|
| |
|
| | return np.float32(np.squeeze(img)/255.)
|
| |
|
| |
|
| | def single2uint(img):
|
| |
|
| | return np.uint8((img.clip(0, 1)*255.).round())
|
| |
|
| |
|
| | def uint162single(img):
|
| |
|
| | return np.float32(img/65535.)
|
| |
|
| |
|
| | def single2uint16(img):
|
| |
|
| | return np.uint8((img.clip(0, 1)*65535.).round())
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def uint2tensor4(img, data_range):
|
| | if img.ndim == 2:
|
| | img = np.expand_dims(img, axis=2)
|
| | return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255./data_range).unsqueeze(0)
|
| |
|
| |
|
| |
|
| | def uint2tensor3(img):
|
| | if img.ndim == 2:
|
| | img = np.expand_dims(img, axis=2)
|
| | return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
|
| |
|
| |
|
| |
|
| | def tensor2uint(img, data_range):
|
| | img = img.data.squeeze().float().clamp_(0, 1*data_range).cpu().numpy()
|
| | if img.ndim == 3:
|
| | img = np.transpose(img, (1, 2, 0))
|
| | return np.uint8((img*255.0/data_range).round())
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def single2tensor4(img):
|
| | return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
|
| |
|
| |
|
| |
|
| | def single2tensor3(img):
|
| | return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
|
| |
|
| |
|
| |
|
| | def tensor2single(img):
|
| | img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
|
| | if img.ndim == 3:
|
| | img = np.transpose(img, (1, 2, 0))
|
| |
|
| | return img
|
| |
|
| | def tensor2single3(img):
|
| | img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
|
| | if img.ndim == 3:
|
| | img = np.transpose(img, (1, 2, 0))
|
| | elif img.ndim == 2:
|
| | img = np.expand_dims(img, axis=2)
|
| | return img
|
| |
|
| |
|
| |
|
| | def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
|
| | '''
|
| | Converts a torch Tensor into an image Numpy array of BGR channel order
|
| | Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
|
| | Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
|
| | '''
|
| | tensor = tensor.squeeze().float().cpu().clamp_(*min_max)
|
| | tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0])
|
| | n_dim = tensor.dim()
|
| | if n_dim == 4:
|
| | n_img = len(tensor)
|
| | img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
|
| | img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))
|
| | elif n_dim == 3:
|
| | img_np = tensor.numpy()
|
| | img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))
|
| | elif n_dim == 2:
|
| | img_np = tensor.numpy()
|
| | else:
|
| | raise TypeError(
|
| | 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
|
| | if out_type == np.uint8:
|
| | img_np = (img_np * 255.0).round()
|
| |
|
| | return img_np.astype(out_type)
|
| |
|
| |
|
| | '''
|
| | # =======================================
|
| | # image processing process on numpy image
|
| | # augment(img_list, hflip=True, rot=True):
|
| | # =======================================
|
| | '''
|
| |
|
| |
|
| | def augment_img(img, mode=0):
|
| | if mode == 0:
|
| | return img
|
| | elif mode == 1:
|
| | return np.flipud(np.rot90(img))
|
| | elif mode == 2:
|
| | return np.flipud(img)
|
| | elif mode == 3:
|
| | return np.rot90(img, k=3)
|
| | elif mode == 4:
|
| | return np.flipud(np.rot90(img, k=2))
|
| | elif mode == 5:
|
| | return np.rot90(img)
|
| | elif mode == 6:
|
| | return np.rot90(img, k=2)
|
| | elif mode == 7:
|
| | return np.flipud(np.rot90(img, k=3))
|
| |
|
| |
|
| | def augment_img_np3(img, mode=0):
|
| | if mode == 0:
|
| | return img
|
| | elif mode == 1:
|
| | return img.transpose(1, 0, 2)
|
| | elif mode == 2:
|
| | return img[::-1, :, :]
|
| | elif mode == 3:
|
| | img = img[::-1, :, :]
|
| | img = img.transpose(1, 0, 2)
|
| | return img
|
| | elif mode == 4:
|
| | return img[:, ::-1, :]
|
| | elif mode == 5:
|
| | img = img[:, ::-1, :]
|
| | img = img.transpose(1, 0, 2)
|
| | return img
|
| | elif mode == 6:
|
| | img = img[:, ::-1, :]
|
| | img = img[::-1, :, :]
|
| | return img
|
| | elif mode == 7:
|
| | img = img[:, ::-1, :]
|
| | img = img[::-1, :, :]
|
| | img = img.transpose(1, 0, 2)
|
| | return img
|
| |
|
| |
|
| | def augment_img_tensor(img, mode=0):
|
| | img_size = img.size()
|
| | img_np = img.data.cpu().numpy()
|
| | if len(img_size) == 3:
|
| | img_np = np.transpose(img_np, (1, 2, 0))
|
| | elif len(img_size) == 4:
|
| | img_np = np.transpose(img_np, (2, 3, 1, 0))
|
| | img_np = augment_img(img_np, mode=mode)
|
| | img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
|
| | if len(img_size) == 3:
|
| | img_tensor = img_tensor.permute(2, 0, 1)
|
| | elif len(img_size) == 4:
|
| | img_tensor = img_tensor.permute(3, 2, 0, 1)
|
| |
|
| | return img_tensor.type_as(img)
|
| |
|
| |
|
| | def augment_imgs(img_list, hflip=True, rot=True):
|
| |
|
| | hflip = hflip and random.random() < 0.5
|
| | vflip = rot and random.random() < 0.5
|
| | rot90 = rot and random.random() < 0.5
|
| |
|
| | def _augment(img):
|
| | if hflip:
|
| | img = img[:, ::-1, :]
|
| | if vflip:
|
| | img = img[::-1, :, :]
|
| | if rot90:
|
| | img = img.transpose(1, 0, 2)
|
| | return img
|
| |
|
| | return [_augment(img) for img in img_list]
|
| |
|
| |
|
| | '''
|
| | # =======================================
|
| | # image processing process on numpy image
|
| | # channel_convert(in_c, tar_type, img_list):
|
| | # rgb2ycbcr(img, only_y=True):
|
| | # bgr2ycbcr(img, only_y=True):
|
| | # ycbcr2rgb(img):
|
| | # modcrop(img_in, scale):
|
| | # =======================================
|
| | '''
|
| |
|
| |
|
| | def rgb2ycbcr(img, only_y=True):
|
| | '''same as matlab rgb2ycbcr
|
| | only_y: only return Y channel
|
| | Input:
|
| | uint8, [0, 255]
|
| | float, [0, 1]
|
| | '''
|
| | in_img_type = img.dtype
|
| | img.astype(np.float32)
|
| | if in_img_type != np.uint8:
|
| | img *= 255.
|
| |
|
| | if only_y:
|
| | rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
|
| | else:
|
| | rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
|
| | [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
|
| | if in_img_type == np.uint8:
|
| | rlt = rlt.round()
|
| | else:
|
| | rlt /= 255.
|
| | return rlt.astype(in_img_type)
|
| |
|
| |
|
| | def ycbcr2rgb(img):
|
| | '''same as matlab ycbcr2rgb
|
| | Input:
|
| | uint8, [0, 255]
|
| | float, [0, 1]
|
| | '''
|
| | in_img_type = img.dtype
|
| | img.astype(np.float32)
|
| | if in_img_type != np.uint8:
|
| | img *= 255.
|
| |
|
| | rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
|
| | [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
|
| | if in_img_type == np.uint8:
|
| | rlt = rlt.round()
|
| | else:
|
| | rlt /= 255.
|
| | return rlt.astype(in_img_type)
|
| |
|
| |
|
| | def bgr2ycbcr(img, only_y=True):
|
| | '''bgr version of rgb2ycbcr
|
| | only_y: only return Y channel
|
| | Input:
|
| | uint8, [0, 255]
|
| | float, [0, 1]
|
| | '''
|
| | in_img_type = img.dtype
|
| | img.astype(np.float32)
|
| | if in_img_type != np.uint8:
|
| | img *= 255.
|
| |
|
| | if only_y:
|
| | rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
|
| | else:
|
| | rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
|
| | [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
|
| | if in_img_type == np.uint8:
|
| | rlt = rlt.round()
|
| | else:
|
| | rlt /= 255.
|
| | return rlt.astype(in_img_type)
|
| |
|
| |
|
| | def modcrop(img_in, scale):
|
| |
|
| | img = np.copy(img_in)
|
| | if img.ndim == 2:
|
| | H, W = img.shape
|
| | H_r, W_r = H % scale, W % scale
|
| | img = img[:H - H_r, :W - W_r]
|
| | elif img.ndim == 3:
|
| | H, W, C = img.shape
|
| | H_r, W_r = H % scale, W % scale
|
| | img = img[:H - H_r, :W - W_r, :]
|
| | else:
|
| | raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
|
| | return img
|
| |
|
| |
|
| | def shave(img_in, border=0):
|
| |
|
| | img = np.copy(img_in)
|
| | h, w = img.shape[:2]
|
| | img = img[border:h-border, border:w-border]
|
| | return img
|
| |
|
| |
|
| | def channel_convert(in_c, tar_type, img_list):
|
| |
|
| | if in_c == 3 and tar_type == 'gray':
|
| | gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
|
| | return [np.expand_dims(img, axis=2) for img in gray_list]
|
| | elif in_c == 3 and tar_type == 'y':
|
| | y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
|
| | return [np.expand_dims(img, axis=2) for img in y_list]
|
| | elif in_c == 1 and tar_type == 'RGB':
|
| | return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
|
| | else:
|
| | return img_list
|
| |
|
| |
|
| | '''
|
| | # =======================================
|
| | # metric, PSNR and SSIM
|
| | # =======================================
|
| | '''
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def calculate_psnr(img1, img2, border=0):
|
| |
|
| | if not img1.shape == img2.shape:
|
| | raise ValueError('Input images must have the same dimensions.')
|
| | h, w = img1.shape[:2]
|
| | img1 = img1[border:h-border, border:w-border]
|
| | img2 = img2[border:h-border, border:w-border]
|
| |
|
| | img1 = img1.astype(np.float64)
|
| | img2 = img2.astype(np.float64)
|
| | mse = np.mean((img1 - img2)**2)
|
| | if mse == 0:
|
| | return float('inf')
|
| | return 20 * math.log10(255.0 / math.sqrt(mse))
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def calculate_ssim(img1, img2, border=0):
|
| | '''calculate SSIM
|
| | the same outputs as MATLAB's
|
| | img1, img2: [0, 255]
|
| | '''
|
| | if not img1.shape == img2.shape:
|
| | raise ValueError('Input images must have the same dimensions.')
|
| | h, w = img1.shape[:2]
|
| | img1 = img1[border:h-border, border:w-border]
|
| | img2 = img2[border:h-border, border:w-border]
|
| |
|
| | if img1.ndim == 2:
|
| | return ssim(img1, img2)
|
| | elif img1.ndim == 3:
|
| | if img1.shape[2] == 3:
|
| | ssims = []
|
| | for i in range(3):
|
| | ssims.append(ssim(img1, img2))
|
| | return np.array(ssims).mean()
|
| | elif img1.shape[2] == 1:
|
| | return ssim(np.squeeze(img1), np.squeeze(img2))
|
| | else:
|
| | raise ValueError('Wrong input image dimensions.')
|
| |
|
| |
|
| | def ssim(img1, img2):
|
| | C1 = (0.01 * 255)**2
|
| | C2 = (0.03 * 255)**2
|
| |
|
| | img1 = img1.astype(np.float64)
|
| | img2 = img2.astype(np.float64)
|
| | kernel = cv2.getGaussianKernel(11, 1.5)
|
| | window = np.outer(kernel, kernel.transpose())
|
| |
|
| | mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
|
| | mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
|
| | mu1_sq = mu1**2
|
| | mu2_sq = mu2**2
|
| | mu1_mu2 = mu1 * mu2
|
| | sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
|
| | sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
|
| | sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
|
| |
|
| | ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
|
| | (sigma1_sq + sigma2_sq + C2))
|
| | return ssim_map.mean()
|
| |
|
| |
|
| | '''
|
| | # =======================================
|
| | # pytorch version of matlab imresize
|
| | # =======================================
|
| | '''
|
| |
|
| |
|
| |
|
| | def cubic(x):
|
| | absx = torch.abs(x)
|
| | absx2 = absx**2
|
| | absx3 = absx**3
|
| | return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
|
| | (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
|
| |
|
| |
|
| | def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
|
| | if (scale < 1) and (antialiasing):
|
| |
|
| | kernel_width = kernel_width / scale
|
| |
|
| |
|
| | x = torch.linspace(1, out_length, out_length)
|
| |
|
| |
|
| |
|
| |
|
| | u = x / scale + 0.5 * (1 - 1 / scale)
|
| |
|
| |
|
| | left = torch.floor(u - kernel_width / 2)
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | P = math.ceil(kernel_width) + 2
|
| |
|
| |
|
| |
|
| | indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
|
| | 1, P).expand(out_length, P)
|
| |
|
| |
|
| |
|
| | distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
|
| |
|
| | if (scale < 1) and (antialiasing):
|
| | weights = scale * cubic(distance_to_center * scale)
|
| | else:
|
| | weights = cubic(distance_to_center)
|
| |
|
| | weights_sum = torch.sum(weights, 1).view(out_length, 1)
|
| | weights = weights / weights_sum.expand(out_length, P)
|
| |
|
| |
|
| | weights_zero_tmp = torch.sum((weights == 0), 0)
|
| | if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
|
| | indices = indices.narrow(1, 1, P - 2)
|
| | weights = weights.narrow(1, 1, P - 2)
|
| | if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
|
| | indices = indices.narrow(1, 0, P - 2)
|
| | weights = weights.narrow(1, 0, P - 2)
|
| | weights = weights.contiguous()
|
| | indices = indices.contiguous()
|
| | sym_len_s = -indices.min() + 1
|
| | sym_len_e = indices.max() - in_length
|
| | indices = indices + sym_len_s - 1
|
| | return weights, indices, int(sym_len_s), int(sym_len_e)
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def imresize(img, scale, antialiasing=True):
|
| |
|
| |
|
| |
|
| | need_squeeze = True if img.dim() == 2 else False
|
| | if need_squeeze:
|
| | img.unsqueeze_(0)
|
| | in_C, in_H, in_W = img.size()
|
| | out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
|
| | kernel_width = 4
|
| | kernel = 'cubic'
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
|
| | in_H, out_H, scale, kernel, kernel_width, antialiasing)
|
| | weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
|
| | in_W, out_W, scale, kernel, kernel_width, antialiasing)
|
| |
|
| |
|
| | img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
|
| | img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
|
| |
|
| | sym_patch = img[:, :sym_len_Hs, :]
|
| | inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
| | sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
| | img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
|
| |
|
| | sym_patch = img[:, -sym_len_He:, :]
|
| | inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
| | sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
| | img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
|
| |
|
| | out_1 = torch.FloatTensor(in_C, out_H, in_W)
|
| | kernel_width = weights_H.size(1)
|
| | for i in range(out_H):
|
| | idx = int(indices_H[i][0])
|
| | for j in range(out_C):
|
| | out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
|
| |
|
| |
|
| |
|
| | out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
|
| | out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
|
| |
|
| | sym_patch = out_1[:, :, :sym_len_Ws]
|
| | inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
|
| | sym_patch_inv = sym_patch.index_select(2, inv_idx)
|
| | out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
|
| |
|
| | sym_patch = out_1[:, :, -sym_len_We:]
|
| | inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
|
| | sym_patch_inv = sym_patch.index_select(2, inv_idx)
|
| | out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
|
| |
|
| | out_2 = torch.FloatTensor(in_C, out_H, out_W)
|
| | kernel_width = weights_W.size(1)
|
| | for i in range(out_W):
|
| | idx = int(indices_W[i][0])
|
| | for j in range(out_C):
|
| | out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
|
| | if need_squeeze:
|
| | out_2.squeeze_()
|
| | return out_2
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | def imresize_np(img, scale, antialiasing=True):
|
| |
|
| |
|
| |
|
| | img = torch.from_numpy(img)
|
| | need_squeeze = True if img.dim() == 2 else False
|
| | if need_squeeze:
|
| | img.unsqueeze_(2)
|
| |
|
| | in_H, in_W, in_C = img.size()
|
| | out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
|
| | kernel_width = 4
|
| | kernel = 'cubic'
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
|
| | in_H, out_H, scale, kernel, kernel_width, antialiasing)
|
| | weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
|
| | in_W, out_W, scale, kernel, kernel_width, antialiasing)
|
| |
|
| |
|
| | img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
|
| | img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
|
| |
|
| | sym_patch = img[:sym_len_Hs, :, :]
|
| | inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
|
| | sym_patch_inv = sym_patch.index_select(0, inv_idx)
|
| | img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
|
| |
|
| | sym_patch = img[-sym_len_He:, :, :]
|
| | inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
|
| | sym_patch_inv = sym_patch.index_select(0, inv_idx)
|
| | img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
|
| |
|
| | out_1 = torch.FloatTensor(out_H, in_W, in_C)
|
| | kernel_width = weights_H.size(1)
|
| | for i in range(out_H):
|
| | idx = int(indices_H[i][0])
|
| | for j in range(out_C):
|
| | out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
|
| |
|
| |
|
| |
|
| | out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
|
| | out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
|
| |
|
| | sym_patch = out_1[:, :sym_len_Ws, :]
|
| | inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
| | sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
| | out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
|
| |
|
| | sym_patch = out_1[:, -sym_len_We:, :]
|
| | inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
| | sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
| | out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
|
| |
|
| | out_2 = torch.FloatTensor(out_H, out_W, in_C)
|
| | kernel_width = weights_W.size(1)
|
| | for i in range(out_W):
|
| | idx = int(indices_W[i][0])
|
| | for j in range(out_C):
|
| | out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
|
| | if need_squeeze:
|
| | out_2.squeeze_()
|
| |
|
| | return out_2.numpy()
|
| |
|
| |
|
| | if __name__ == '__main__':
|
| | img = imread_uint('test.bmp',3) |