repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/div2kjpeg.py
import os from data import srdata from data import div2k class DIV2KJPEG(div2k.DIV2K): def __init__(self, args, name='', train=True, benchmark=False): self.q_factor = int(name.replace('DIV2K-Q', '')) super(DIV2KJPEG, self).__init__( args, name=name, train=train, benchmark=benchmark ) def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, 'DIV2K') self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR') self.dir_lr = os.path.join( self.apath, 'DIV2K_Q{}'.format(self.q_factor) ) if self.input_large: self.dir_lr += 'L' self.ext = ('.png', '.jpg')
675
31.190476
67
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/sr291.py
from data import srdata class SR291(srdata.SRData): def __init__(self, args, name='SR291', train=True, benchmark=False): super(SR291, self).__init__(args, name=name)
180
24.857143
72
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/benchmark.py
import os from data import common from data import srdata import numpy as np import torch import torch.utils.data as data class Benchmark(srdata.SRData): def __init__(self, args, name='', train=True, benchmark=True): super(Benchmark, self).__init__( args, name=name, train=train, benchmark=True ) def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, 'benchmark', self.name) self.dir_hr = os.path.join(self.apath, 'HR') if self.input_large: self.dir_lr = os.path.join(self.apath, 'LR_bicubicL') else: self.dir_lr = os.path.join(self.apath, 'LR_bicubic') self.ext = ('', '.png')
703
26.076923
67
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/video.py
import os from data import common import cv2 import numpy as np import imageio import torch import torch.utils.data as data class Video(data.Dataset): def __init__(self, args, name='Video', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.do_eval = False self.benchmark = benchmark self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo)) self.vidcap = cv2.VideoCapture(args.dir_demo) self.n_frames = 0 self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) def __getitem__(self, idx): success, lr = self.vidcap.read() if success: self.n_frames += 1 lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames) else: vidcap.release() return None def __len__(self): return self.total_frames def set_scale(self, idx_scale): self.idx_scale = idx_scale
1,207
25.844444
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/srdata.py
import os import glob import random import pickle from data import common import numpy as np import imageio import torch import torch.utils.data as data class SRData(data.Dataset): def __init__(self, args, name='', train=True, benchmark=False): self.args = args self.name = name self.train = train self.split = 'train' if train else 'test' self.do_eval = True self.benchmark = benchmark self.input_large = (args.model == 'VDSR') self.scale = args.scale self.idx_scale = 0 self._set_filesystem(args.dir_data) if args.ext.find('img') < 0: path_bin = os.path.join(self.apath, 'bin') os.makedirs(path_bin, exist_ok=True) list_hr, list_lr = self._scan() if args.ext.find('img') >= 0 or benchmark: self.images_hr, self.images_lr = list_hr, list_lr elif args.ext.find('sep') >= 0: os.makedirs( self.dir_hr.replace(self.apath, path_bin), exist_ok=True ) for s in self.scale: os.makedirs( os.path.join( self.dir_lr.replace(self.apath, path_bin), 'X{}'.format(s) ), exist_ok=True ) self.images_hr, self.images_lr = [], [[] for _ in self.scale] for h in list_hr: b = h.replace(self.apath, path_bin) b = b.replace(self.ext[0], '.pt') self.images_hr.append(b) self._check_and_load(args.ext, h, b, verbose=True) for i, ll in enumerate(list_lr): for l in ll: b = l.replace(self.apath, path_bin) b = b.replace(self.ext[1], '.pt') self.images_lr[i].append(b) self._check_and_load(args.ext, l, b, verbose=True) if train: n_patches = args.batch_size * args.test_every n_images = len(args.data_train) * len(self.images_hr) if n_images == 0: self.repeat = 0 else: self.repeat = max(n_patches // n_images, 1) # Below functions as used to prepare images def _scan(self): names_hr = sorted( glob.glob(os.path.join(self.dir_hr, '*' + self.ext[0])) ) names_lr = [[] for _ in self.scale] for f in names_hr: filename, _ = os.path.splitext(os.path.basename(f)) for si, s in enumerate(self.scale): names_lr[si].append(os.path.join( self.dir_lr, 'X{}/{}x{}{}'.format( s, filename, s, self.ext[1] ) )) return names_hr, names_lr def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, self.name) self.dir_hr = os.path.join(self.apath, 'HR') self.dir_lr = os.path.join(self.apath, 'LR_bicubic') if self.input_large: self.dir_lr += 'L' self.ext = ('.png', '.png') def _check_and_load(self, ext, img, f, verbose=True): if not os.path.isfile(f) or ext.find('reset') >= 0: if verbose: print('Making a binary: {}'.format(f)) with open(f, 'wb') as _f: pickle.dump(imageio.imread(img), _f) def __getitem__(self, idx): lr, hr, filename = self._load_file(idx) pair = self.get_patch(lr, hr) pair = common.set_channel(*pair, n_channels=self.args.n_colors) pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) return pair_t[0], pair_t[1], filename def __len__(self): if self.train: return len(self.images_hr) * self.repeat else: return len(self.images_hr) def _get_index(self, idx): if self.train: return idx % len(self.images_hr) else: return idx def _load_file(self, idx): idx = self._get_index(idx) f_hr = self.images_hr[idx] f_lr = self.images_lr[self.idx_scale][idx] filename, _ = os.path.splitext(os.path.basename(f_hr)) if self.args.ext == 'img' or self.benchmark: hr = imageio.imread(f_hr) lr = imageio.imread(f_lr) elif self.args.ext.find('sep') >= 0: with open(f_hr, 'rb') as _f: hr = pickle.load(_f) with open(f_lr, 'rb') as _f: lr = pickle.load(_f) return lr, hr, filename def get_patch(self, lr, hr): scale = self.scale[self.idx_scale] if self.train: lr, hr = common.get_patch( lr, hr, patch_size=self.args.patch_size, scale=scale, multi=(len(self.scale) > 1), input_large=self.input_large ) if not self.args.no_augment: lr, hr = common.augment(lr, hr) else: ih, iw = lr.shape[:2] hr = hr[0:ih * scale, 0:iw * scale] return lr, hr def set_scale(self, idx_scale): if not self.input_large: self.idx_scale = idx_scale else: self.idx_scale = random.randint(0, len(self.scale) - 1)
5,343
32.822785
73
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/demo.py
import os from data import common import numpy as np import imageio import torch import torch.utils.data as data class Demo(data.Dataset): def __init__(self, args, name='Demo', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.benchmark = benchmark self.filelist = [] for f in os.listdir(args.dir_demo): if f.find('.png') >= 0 or f.find('.jp') >= 0: self.filelist.append(os.path.join(args.dir_demo, f)) self.filelist.sort() def __getitem__(self, idx): filename = os.path.splitext(os.path.basename(self.filelist[idx]))[0] lr = imageio.imread(self.filelist[idx]) lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, filename def __len__(self): return len(self.filelist) def set_scale(self, idx_scale): self.idx_scale = idx_scale
1,075
25.9
76
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/common.py
import random import numpy as np import skimage.color as sc import torch def get_patch(*args, patch_size=96, scale=2, multi=False, input_large=False): ih, iw = args[0].shape[:2] if not input_large: p = scale if multi else 1 tp = p * patch_size ip = tp // scale else: tp = patch_size ip = patch_size ix = random.randrange(0, iw - ip + 1) iy = random.randrange(0, ih - ip + 1) if not input_large: tx, ty = scale * ix, scale * iy else: tx, ty = ix, iy ret = [ args[0][iy:iy + ip, ix:ix + ip, :], *[a[ty:ty + tp, tx:tx + tp, :] for a in args[1:]] ] return ret def set_channel(*args, n_channels=3): def _set_channel(img): if img.ndim == 2: img = np.expand_dims(img, axis=2) c = img.shape[2] if n_channels == 1 and c == 3: img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2) elif n_channels == 3 and c == 1: img = np.concatenate([img] * n_channels, 2) return img return [_set_channel(a) for a in args] def np2Tensor(*args, rgb_range=255): def _np2Tensor(img): np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1))) tensor = torch.from_numpy(np_transpose).float() tensor.mul_(rgb_range / 255) return tensor return [_np2Tensor(a) for a in args] def augment(*args, hflip=True, rot=True): hflip = hflip and random.random() < 0.5 vflip = rot and random.random() < 0.5 rot90 = rot and random.random() < 0.5 def _augment(img): if hflip: img = img[:, ::-1, :] if vflip: img = img[::-1, :, :] if rot90: img = img.transpose(1, 0, 2) return img return [_augment(a) for a in args]
1,786
23.479452
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/__init__.py
from importlib import import_module #from dataloader import MSDataLoader from torch.utils.data import dataloader from torch.utils.data import ConcatDataset # This is a simple wrapper function for ConcatDataset class MyConcatDataset(ConcatDataset): def __init__(self, datasets): super(MyConcatDataset, self).__init__(datasets) self.train = datasets[0].train def set_scale(self, idx_scale): for d in self.datasets: if hasattr(d, 'set_scale'): d.set_scale(idx_scale) class Data: def __init__(self, args): self.loader_train = None if not args.test_only: datasets = [] for d in args.data_train: module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) datasets.append(getattr(m, module_name)(args, name=d)) self.loader_train = dataloader.DataLoader( MyConcatDataset(datasets), batch_size=args.batch_size, shuffle=True, pin_memory=not args.cpu, num_workers=args.n_threads, ) self.loader_test = [] for d in args.data_test: if d in ['Set5', 'Set14', 'B100', 'Urban100']: m = import_module('data.benchmark') testset = getattr(m, 'Benchmark')(args, train=False, name=d) else: module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) testset = getattr(m, module_name)(args, train=False, name=d) self.loader_test.append( dataloader.DataLoader( testset, batch_size=1, shuffle=False, pin_memory=not args.cpu, num_workers=args.n_threads, ) )
1,949
35.792453
76
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/data/div2k.py
import os from data import srdata class DIV2K(srdata.SRData): def __init__(self, args, name='DIV2K', train=True, benchmark=False): data_range = [r.split('-') for r in args.data_range.split('/')] if train: data_range = data_range[0] else: if args.test_only and len(data_range) == 1: data_range = data_range[0] else: data_range = data_range[1] self.begin, self.end = list(map(lambda x: int(x), data_range)) super(DIV2K, self).__init__( args, name=name, train=train, benchmark=benchmark ) def _scan(self): names_hr, names_lr = super(DIV2K, self)._scan() names_hr = names_hr[self.begin - 1:self.end] names_lr = [n[self.begin - 1:self.end] for n in names_lr] return names_hr, names_lr def _set_filesystem(self, dir_data): super(DIV2K, self)._set_filesystem(dir_data) self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR') self.dir_lr = os.path.join(self.apath, 'DIV2K_train_LR_bicubic') if self.input_large: self.dir_lr += 'L'
1,134
33.393939
72
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/rcan.py
## ECCV-2018-Image Super-Resolution Using Very Deep Residual Channel Attention Networks ## https://arxiv.org/abs/1807.02758 from model import common from model.attention import ContextualAttention import torch.nn as nn import torch def make_model(args, parent=False): return RCAN(args) ## Channel Attention (CA) Layer class CALayer(nn.Module): def __init__(self, channel, reduction=16): super(CALayer, self).__init__() # global average pooling: feature --> point self.avg_pool = nn.AdaptiveAvgPool2d(1) # feature channel downscale and upscale --> channel weight #self.a = torch.nn.Parameter(torch.Tensor([0])) #self.a.requires_grad=True self.conv_du = nn.Sequential( nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True), nn.ReLU(inplace=True), nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True), nn.Sigmoid() ) def forward(self, x): y = self.avg_pool(x) y = self.conv_du(y) return x * y ## Residual Channel Attention Block (RCAB) class RCAB(nn.Module): def __init__( self, conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1): super(RCAB, self).__init__() modules_body = [] for i in range(2): modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias)) if bn: modules_body.append(nn.BatchNorm2d(n_feat)) if i == 0: modules_body.append(act) modules_body.append(CALayer(n_feat, reduction)) self.body = nn.Sequential(*modules_body) self.res_scale = res_scale def forward(self, x): res = self.body(x) #res = self.body(x).mul(self.res_scale) res += x return res ## Residual Group (RG) class ResidualGroup(nn.Module): def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks): super(ResidualGroup, self).__init__() modules_body = [] modules_body = [ RCAB( conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1) \ for _ in range(n_resblocks)] modules_body.append(conv(n_feat, n_feat, kernel_size)) self.body = nn.Sequential(*modules_body) def forward(self, x): res = self.body(x) res += x return res ## Residual Channel Attention Network (RCAN) class RCAN(nn.Module): def __init__(self, args, conv=common.default_conv): super(RCAN, self).__init__() self.a = nn.Parameter(torch.Tensor([0])) self.a.requires_grad=True n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 reduction = args.reduction scale = args.scale[0] act = nn.ReLU(True) # RGB mean for DIV2K rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] self.msa = ContextualAttention() # define body module modules_body = [ ResidualGroup( conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \ for _ in range(5)] modules_body.append(self.msa) for i in range(5): modules_body.append(ResidualGroup(conv,n_feats,kernel_size,reduction,act=act,res_scale=args.res_scale,n_resblocks=n_resblocks)) modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail(res) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=False): own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('msa') or name.find('a') >= 0: print('Replace pre-trained upsampler to new one...') else: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('msa') == -1: raise KeyError('unexpected key "{}" in state_dict' .format(name)) if strict: missing = set(own_state.keys()) - set(state_dict.keys()) if len(missing) > 0: raise KeyError('missing keys in state_dict: "{}"'.format(missing))
5,662
36.256579
139
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/ddbpn.py
# Deep Back-Projection Networks For Super-Resolution # https://arxiv.org/abs/1803.02735 from model import common import torch import torch.nn as nn def make_model(args, parent=False): return DDBPN(args) def projection_conv(in_channels, out_channels, scale, up=True): kernel_size, stride, padding = { 2: (6, 2, 2), 4: (8, 4, 2), 8: (12, 8, 2) }[scale] if up: conv_f = nn.ConvTranspose2d else: conv_f = nn.Conv2d return conv_f( in_channels, out_channels, kernel_size, stride=stride, padding=padding ) class DenseProjection(nn.Module): def __init__(self, in_channels, nr, scale, up=True, bottleneck=True): super(DenseProjection, self).__init__() if bottleneck: self.bottleneck = nn.Sequential(*[ nn.Conv2d(in_channels, nr, 1), nn.PReLU(nr) ]) inter_channels = nr else: self.bottleneck = None inter_channels = in_channels self.conv_1 = nn.Sequential(*[ projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr) ]) self.conv_2 = nn.Sequential(*[ projection_conv(nr, inter_channels, scale, not up), nn.PReLU(inter_channels) ]) self.conv_3 = nn.Sequential(*[ projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr) ]) def forward(self, x): if self.bottleneck is not None: x = self.bottleneck(x) a_0 = self.conv_1(x) b_0 = self.conv_2(a_0) e = b_0.sub(x) a_1 = self.conv_3(e) out = a_0.add(a_1) return out class DDBPN(nn.Module): def __init__(self, args): super(DDBPN, self).__init__() scale = args.scale[0] n0 = 128 nr = 32 self.depth = 6 rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) initial = [ nn.Conv2d(args.n_colors, n0, 3, padding=1), nn.PReLU(n0), nn.Conv2d(n0, nr, 1), nn.PReLU(nr) ] self.initial = nn.Sequential(*initial) self.upmodules = nn.ModuleList() self.downmodules = nn.ModuleList() channels = nr for i in range(self.depth): self.upmodules.append( DenseProjection(channels, nr, scale, True, i > 1) ) if i != 0: channels += nr channels = nr for i in range(self.depth - 1): self.downmodules.append( DenseProjection(channels, nr, scale, False, i != 0) ) channels += nr reconstruction = [ nn.Conv2d(self.depth * nr, args.n_colors, 3, padding=1) ] self.reconstruction = nn.Sequential(*reconstruction) self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) def forward(self, x): x = self.sub_mean(x) x = self.initial(x) h_list = [] l_list = [] for i in range(self.depth - 1): if i == 0: l = x else: l = torch.cat(l_list, dim=1) h_list.append(self.upmodules[i](l)) l_list.append(self.downmodules[i](torch.cat(h_list, dim=1))) h_list.append(self.upmodules[-1](torch.cat(l_list, dim=1))) out = self.reconstruction(torch.cat(h_list, dim=1)) out = self.add_mean(out) return out
3,629
26.5
78
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/rdn.py
# Residual Dense Network for Image Super-Resolution # https://arxiv.org/abs/1802.08797 from model import common import torch import torch.nn as nn def make_model(args, parent=False): return RDN(args) class RDB_Conv(nn.Module): def __init__(self, inChannels, growRate, kSize=3): super(RDB_Conv, self).__init__() Cin = inChannels G = growRate self.conv = nn.Sequential(*[ nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1), nn.ReLU() ]) def forward(self, x): out = self.conv(x) return torch.cat((x, out), 1) class RDB(nn.Module): def __init__(self, growRate0, growRate, nConvLayers, kSize=3): super(RDB, self).__init__() G0 = growRate0 G = growRate C = nConvLayers convs = [] for c in range(C): convs.append(RDB_Conv(G0 + c*G, G)) self.convs = nn.Sequential(*convs) # Local Feature Fusion self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1) def forward(self, x): return self.LFF(self.convs(x)) + x class RDN(nn.Module): def __init__(self, args): super(RDN, self).__init__() r = args.scale[0] G0 = args.G0 kSize = args.RDNkSize # number of RDB blocks, conv layers, out channels self.D, C, G = { 'A': (20, 6, 32), 'B': (16, 8, 64), }[args.RDNconfig] # Shallow feature extraction net self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1) self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) # Redidual dense blocks and dense feature fusion self.RDBs = nn.ModuleList() for i in range(self.D): self.RDBs.append( RDB(growRate0 = G0, growRate = G, nConvLayers = C) ) # Global Feature Fusion self.GFF = nn.Sequential(*[ nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1), nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) ]) # Up-sampling net if r == 2 or r == 3: self.UPNet = nn.Sequential(*[ nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(r), nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) ]) elif r == 4: self.UPNet = nn.Sequential(*[ nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(2), nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(2), nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) ]) else: raise ValueError("scale must be 2 or 3 or 4.") def forward(self, x): f__1 = self.SFENet1(x) x = self.SFENet2(f__1) RDBs_out = [] for i in range(self.D): x = self.RDBs[i](x) RDBs_out.append(x) x = self.GFF(torch.cat(RDBs_out,1)) x += f__1 return self.UPNet(x)
3,202
29.216981
90
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/mdsr.py
from model import common import torch.nn as nn def make_model(args, parent=False): return MDSR(args) class MDSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(MDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 self.scale_idx = 0 act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) m_head = [conv(args.n_colors, n_feats, kernel_size)] self.pre_process = nn.ModuleList([ nn.Sequential( common.ResBlock(conv, n_feats, 5, act=act), common.ResBlock(conv, n_feats, 5, act=act) ) for _ in args.scale ]) m_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) self.upsample = nn.ModuleList([ common.Upsampler( conv, s, n_feats, act=False ) for s in args.scale ]) m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) x = self.pre_process[self.scale_idx](x) res = self.body(x) res += x x = self.upsample[self.scale_idx](res) x = self.tail(x) x = self.add_mean(x) return x def set_scale(self, scale_idx): self.scale_idx = scale_idx
1,837
25.637681
78
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/common.py
import math import torch import torch.nn as nn import torch.nn.functional as F def default_conv(in_channels, out_channels, kernel_size,stride=1, bias=True): return nn.Conv2d( in_channels, out_channels, kernel_size, padding=(kernel_size//2),stride=stride, bias=bias) class MeanShift(nn.Conv2d): def __init__( self, rgb_range, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1): super(MeanShift, self).__init__(3, 3, kernel_size=1) std = torch.Tensor(rgb_std) self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1) self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std for p in self.parameters(): p.requires_grad = False class BasicBlock(nn.Sequential): def __init__( self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True, bn=False, act=nn.PReLU()): m = [conv(in_channels, out_channels, kernel_size, bias=bias)] if bn: m.append(nn.BatchNorm2d(out_channels)) if act is not None: m.append(act) super(BasicBlock, self).__init__(*m) class ResBlock(nn.Module): def __init__( self, conv, n_feats, kernel_size, bias=True, bn=False, act=nn.PReLU(), res_scale=1): super(ResBlock, self).__init__() m = [] for i in range(2): m.append(conv(n_feats, n_feats, kernel_size, bias=bias)) if bn: m.append(nn.BatchNorm2d(n_feats)) if i == 0: m.append(act) self.body = nn.Sequential(*m) self.res_scale = res_scale def forward(self, x): res = self.body(x).mul(self.res_scale) res += x return res class Upsampler(nn.Sequential): def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feats, 4 * n_feats, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) elif scale == 3: m.append(conv(n_feats, 9 * n_feats, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
2,799
30.460674
80
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/__init__.py
import os from importlib import import_module import torch import torch.nn as nn from torch.autograd import Variable class Model(nn.Module): def __init__(self, args, ckp): super(Model, self).__init__() print('Making model...') self.scale = args.scale self.idx_scale = 0 self.self_ensemble = args.self_ensemble self.chop = args.chop self.precision = args.precision self.cpu = args.cpu self.device = torch.device('cpu' if args.cpu else 'cuda') self.n_GPUs = args.n_GPUs self.save_models = args.save_models module = import_module('model.' + args.model.lower()) self.model = module.make_model(args).to(self.device) if args.precision == 'half': self.model.half() if not args.cpu and args.n_GPUs > 1: self.model = nn.DataParallel(self.model, range(args.n_GPUs)) self.load( ckp.dir, pre_train=args.pre_train, resume=args.resume, cpu=args.cpu ) print(self.model, file=ckp.log_file) def forward(self, x, idx_scale): self.idx_scale = idx_scale target = self.get_model() if hasattr(target, 'set_scale'): target.set_scale(idx_scale) if self.self_ensemble and not self.training: if self.chop: forward_function = self.forward_chop else: forward_function = self.model.forward return self.forward_x8(x, forward_function) elif self.chop and not self.training: return self.forward_chop(x) else: return self.model(x) def get_model(self): if self.n_GPUs == 1: return self.model else: return self.model.module def state_dict(self, **kwargs): target = self.get_model() return target.state_dict(**kwargs) def save(self, apath, epoch, is_best=False): target = self.get_model() torch.save( target.state_dict(), os.path.join(apath, 'model_latest.pt') ) if is_best: torch.save( target.state_dict(), os.path.join(apath, 'model_best.pt') ) if self.save_models: torch.save( target.state_dict(), os.path.join(apath, 'model_{}.pt'.format(epoch)) ) def load(self, apath, pre_train='.', resume=-1, cpu=False): if cpu: kwargs = {'map_location': lambda storage, loc: storage} else: kwargs = {} if resume == -1: self.get_model().load_state_dict( torch.load( os.path.join(apath, 'model_latest.pt'), **kwargs ), strict=False ) elif resume == 0: if pre_train != '.': print('Loading model from {}'.format(pre_train)) self.get_model().load_state_dict( torch.load(pre_train, **kwargs), strict=False ) else: self.get_model().load_state_dict( torch.load( os.path.join(apath, 'model', 'model_{}.pt'.format(resume)), **kwargs ), strict=False ) def forward_chop(self, x, shave=10, min_size=6400): scale = self.scale[self.idx_scale] n_GPUs = min(self.n_GPUs, 4) b, c, h, w = x.size() h_half, w_half = h // 2, w // 2 h_size, w_size = h_half + shave, w_half + shave h_size += h_size%scale w_size +=w_size%scale lr_list = [ x[:, :, 0:h_size, 0:w_size], x[:, :, 0:h_size, (w - w_size):w], x[:, :, (h - h_size):h, 0:w_size], x[:, :, (h - h_size):h, (w - w_size):w]] if w_size * h_size < min_size: sr_list = [] for i in range(0, 4, n_GPUs): lr_batch = torch.cat(lr_list[i:(i + n_GPUs)], dim=0) sr_batch = self.model(lr_batch) sr_list.extend(sr_batch.chunk(n_GPUs, dim=0)) else: sr_list = [ self.forward_chop(patch, shave=shave, min_size=min_size) \ for patch in lr_list ] h, w = scale * h, scale * w h_half, w_half = scale * h_half, scale * w_half h_size, w_size = scale * h_size, scale * w_size shave *= scale output = x.new(b, c, h, w) output[:, :, 0:h_half, 0:w_half] \ = sr_list[0][:, :, 0:h_half, 0:w_half] output[:, :, 0:h_half, w_half:w] \ = sr_list[1][:, :, 0:h_half, (w_size - w + w_half):w_size] output[:, :, h_half:h, 0:w_half] \ = sr_list[2][:, :, (h_size - h + h_half):h_size, 0:w_half] output[:, :, h_half:h, w_half:w] \ = sr_list[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size] return output def forward_x8(self, x, forward_function): def _transform(v, op): if self.precision != 'single': v = v.float() v2np = v.data.cpu().numpy() if op == 'v': tfnp = v2np[:, :, :, ::-1].copy() elif op == 'h': tfnp = v2np[:, :, ::-1, :].copy() elif op == 't': tfnp = v2np.transpose((0, 1, 3, 2)).copy() ret = torch.Tensor(tfnp).to(self.device) if self.precision == 'half': ret = ret.half() return ret lr_list = [x] for tf in 'v', 'h', 't': lr_list.extend([_transform(t, tf) for t in lr_list]) sr_list = [forward_function(aug) for aug in lr_list] for i in range(len(sr_list)): if i > 3: sr_list[i] = _transform(sr_list[i], 't') if i % 4 > 1: sr_list[i] = _transform(sr_list[i], 'h') if (i % 4) % 2 == 1: sr_list[i] = _transform(sr_list[i], 'v') output_cat = torch.cat(sr_list, dim=0) output = output_cat.mean(dim=0, keepdim=True) return output
6,243
31.520833
90
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/mssr.py
from model import common import torch.nn as nn import torch from model.attention import ContextualAttention,NonLocalAttention def make_model(args, parent=False): return MSSR(args) class MultisourceProjection(nn.Module): def __init__(self, in_channel,kernel_size = 3, conv=common.default_conv): super(MultisourceProjection, self).__init__() self.up_attention = ContextualAttention(scale=2) self.down_attention = NonLocalAttention() self.upsample = nn.Sequential(*[nn.ConvTranspose2d(in_channel,in_channel,6,stride=2,padding=2),nn.PReLU()]) self.encoder = common.ResBlock(conv, in_channel, kernel_size, act=nn.PReLU(), res_scale=1) def forward(self,x): down_map = self.upsample(self.down_attention(x)) up_map = self.up_attention(x) err = self.encoder(up_map-down_map) final_map = down_map + err return final_map class RecurrentProjection(nn.Module): def __init__(self, in_channel,kernel_size = 3, conv=common.default_conv): super(RecurrentProjection, self).__init__() self.multi_source_projection_1 = MultisourceProjection(in_channel,kernel_size=kernel_size,conv=conv) self.multi_source_projection_2 = MultisourceProjection(in_channel,kernel_size=kernel_size,conv=conv) self.down_sample_1 = nn.Sequential(*[nn.Conv2d(in_channel,in_channel,6,stride=2,padding=2),nn.PReLU()]) #self.down_sample_2 = nn.Sequential(*[nn.Conv2d(in_channel,in_channel,6,stride=2,padding=2),nn.PReLU()]) self.down_sample_3 = nn.Sequential(*[nn.Conv2d(in_channel,in_channel,8,stride=4,padding=2),nn.PReLU()]) self.down_sample_4 = nn.Sequential(*[nn.Conv2d(in_channel,in_channel,8,stride=4,padding=2),nn.PReLU()]) self.error_encode_1 = nn.Sequential(*[nn.ConvTranspose2d(in_channel,in_channel,6,stride=2,padding=2),nn.PReLU()]) self.error_encode_2 = nn.Sequential(*[nn.ConvTranspose2d(in_channel,in_channel,8,stride=4,padding=2),nn.PReLU()]) self.post_conv = common.BasicBlock(conv,in_channel,in_channel,kernel_size,stride=1,bias=True,act=nn.PReLU()) def forward(self, x): x_up = self.multi_source_projection_1(x) x_down = self.down_sample_1(x_up) error_up = self.error_encode_1(x-x_down) h_estimate_1 = x_up + error_up x_up_2 = self.multi_source_projection_2(h_estimate_1) x_down_2 = self.down_sample_3(x_up_2) error_up_2 = self.error_encode_2(x-x_down_2) h_estimate_2 = x_up_2 + error_up_2 x_final = self.post_conv(self.down_sample_4(h_estimate_2)) return x_final, h_estimate_2 class MSSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(MSSR, self).__init__() #n_convblock = args.n_convblocks n_feats = args.n_feats self.depth = args.depth kernel_size = 3 scale = args.scale[0] rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # define head module m_head = [common.BasicBlock(conv, args.n_colors, n_feats, kernel_size,stride=1,bias=True,bn=False,act=nn.PReLU()), common.BasicBlock(conv,n_feats, n_feats, kernel_size,stride=1,bias=True,bn=False,act=nn.PReLU())] # define multiple reconstruction module self.body = RecurrentProjection(n_feats) # define tail module m_tail = [ nn.Conv2d( n_feats*self.depth, args.n_colors, kernel_size, padding=(kernel_size//2) ) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.tail = nn.Sequential(*m_tail) def forward(self,input): x = self.sub_mean(input) x = self.head(x) bag = [] for i in range(self.depth): x, h_estimate = self.body(x) bag.append(h_estimate) h_feature = torch.cat(bag,dim=1) h_final = self.tail(h_feature) return self.add_mean(h_final)
4,174
38.761905
122
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/edsr.py
from model import common from model import attention import torch.nn as nn def make_model(args, parent=False): if args.dilation: from model import dilated return PAEDSR(args, dilated.dilated_conv) else: return PAEDSR(args) class PAEDSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(PAEDSR, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) self.msa = attention.PyramidAttention(channel=256, reduction=8,res_scale=args.res_scale); # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale ) for _ in range(n_resblock//2) ] m_body.append(self.msa) for _ in range(n_resblock//2): m_body.append( common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale )) m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), nn.Conv2d( n_feats, args.n_colors, kernel_size, padding=(kernel_size//2) ) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail(res) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key "{}" in state_dict' .format(name))
2,989
32.977273
106
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/attention.py
import torch import torch.nn as nn import torch.nn.functional as F from torchvision import transforms from torchvision import utils as vutils from model import common from utils.tools import extract_image_patches,\ reduce_mean, reduce_sum, same_padding class PyramidAttention(nn.Module): def __init__(self, level=5, res_scale=1, channel=64, reduction=2, ksize=3, stride=1, softmax_scale=10, average=True, conv=common.default_conv): super(PyramidAttention, self).__init__() self.ksize = ksize self.stride = stride self.res_scale = res_scale self.softmax_scale = softmax_scale self.scale = [1-i/10 for i in range(level)] self.average = average escape_NaN = torch.FloatTensor([1e-4]) self.register_buffer('escape_NaN', escape_NaN) self.conv_match_L_base = common.BasicBlock(conv,channel,channel//reduction, 1, bn=False, act=nn.PReLU()) self.conv_match = common.BasicBlock(conv,channel, channel//reduction, 1, bn=False, act=nn.PReLU()) self.conv_assembly = common.BasicBlock(conv,channel, channel,1,bn=False, act=nn.PReLU()) def forward(self, input): res = input #theta match_base = self.conv_match_L_base(input) shape_base = list(res.size()) input_groups = torch.split(match_base,1,dim=0) # patch size for matching kernel = self.ksize # raw_w is for reconstruction raw_w = [] # w is for matching w = [] #build feature pyramid for i in range(len(self.scale)): ref = input if self.scale[i]!=1: ref = F.interpolate(input, scale_factor=self.scale[i], mode='bicubic') #feature transformation function f base = self.conv_assembly(ref) shape_input = base.shape #sampling raw_w_i = extract_image_patches(base, ksizes=[kernel, kernel], strides=[self.stride,self.stride], rates=[1, 1], padding='same') # [N, C*k*k, L] raw_w_i = raw_w_i.view(shape_input[0], shape_input[1], kernel, kernel, -1) raw_w_i = raw_w_i.permute(0, 4, 1, 2, 3) # raw_shape: [N, L, C, k, k] raw_w_i_groups = torch.split(raw_w_i, 1, dim=0) raw_w.append(raw_w_i_groups) #feature transformation function g ref_i = self.conv_match(ref) shape_ref = ref_i.shape #sampling w_i = extract_image_patches(ref_i, ksizes=[self.ksize, self.ksize], strides=[self.stride, self.stride], rates=[1, 1], padding='same') w_i = w_i.view(shape_ref[0], shape_ref[1], self.ksize, self.ksize, -1) w_i = w_i.permute(0, 4, 1, 2, 3) # w shape: [N, L, C, k, k] w_i_groups = torch.split(w_i, 1, dim=0) w.append(w_i_groups) y = [] for idx, xi in enumerate(input_groups): #group in a filter wi = torch.cat([w[i][idx][0] for i in range(len(self.scale))],dim=0) # [L, C, k, k] #normalize max_wi = torch.max(torch.sqrt(reduce_sum(torch.pow(wi, 2), axis=[1, 2, 3], keepdim=True)), self.escape_NaN) wi_normed = wi/ max_wi #matching xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1]) # xi: 1*c*H*W yi = F.conv2d(xi, wi_normed, stride=1) # [1, L, H, W] L = shape_ref[2]*shape_ref[3] yi = yi.view(1,wi.shape[0], shape_base[2], shape_base[3]) # (B=1, C=32*32, H=32, W=32) # softmax matching score yi = F.softmax(yi*self.softmax_scale, dim=1) if self.average == False: yi = (yi == yi.max(dim=1,keepdim=True)[0]).float() # deconv for patch pasting raw_wi = torch.cat([raw_w[i][idx][0] for i in range(len(self.scale))],dim=0) yi = F.conv_transpose2d(yi, raw_wi, stride=self.stride,padding=1)/4. y.append(yi) y = torch.cat(y, dim=0)+res*self.res_scale # back to the mini-batch return y
4,427
46.106383
147
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/vdsr.py
from model import common import torch.nn as nn import torch.nn.init as init url = { 'r20f64': '' } def make_model(args, parent=False): return VDSR(args) class VDSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(VDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 self.url = url['r{}f{}'.format(n_resblocks, n_feats)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) def basic_block(in_channels, out_channels, act): return common.BasicBlock( conv, in_channels, out_channels, kernel_size, bias=True, bn=False, act=act ) # define body module m_body = [] m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True))) for _ in range(n_resblocks - 2): m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True))) m_body.append(basic_block(n_feats, args.n_colors, None)) self.body = nn.Sequential(*m_body) def forward(self, x): x = self.sub_mean(x) res = self.body(x) res += x x = self.add_mean(res) return x
1,275
26.148936
73
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/paedsr.py
from model import common from model import attention import torch.nn as nn def make_model(args, parent=False): if args.dilation: from model import dilated return PAEDSR(args, dilated.dilated_conv) else: return PAEDSR(args) class PAEDSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(PAEDSR, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) self.msa = attention.PyramidAttention(channel=256, reduction=8,res_scale=args.res_scale); # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale ) for _ in range(n_resblock//2) ] m_body.append(self.msa) for _ in range(n_resblock//2): m_body.append( common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale )) m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), nn.Conv2d( n_feats, args.n_colors, kernel_size, padding=(kernel_size//2) ) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail(res) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key "{}" in state_dict' .format(name))
2,989
32.977273
106
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/utils/tools.py
import os import torch import numpy as np from PIL import Image import torch.nn.functional as F def normalize(x): return x.mul_(2).add_(-1) def same_padding(images, ksizes, strides, rates): assert len(images.size()) == 4 batch_size, channel, rows, cols = images.size() out_rows = (rows + strides[0] - 1) // strides[0] out_cols = (cols + strides[1] - 1) // strides[1] effective_k_row = (ksizes[0] - 1) * rates[0] + 1 effective_k_col = (ksizes[1] - 1) * rates[1] + 1 padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows) padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols) # Pad the input padding_top = int(padding_rows / 2.) padding_left = int(padding_cols / 2.) padding_bottom = padding_rows - padding_top padding_right = padding_cols - padding_left paddings = (padding_left, padding_right, padding_top, padding_bottom) images = torch.nn.ZeroPad2d(paddings)(images) return images def extract_image_patches(images, ksizes, strides, rates, padding='same'): """ Extract patches from images and put them in the C output dimension. :param padding: :param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape :param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for each dimension of images :param strides: [stride_rows, stride_cols] :param rates: [dilation_rows, dilation_cols] :return: A Tensor """ assert len(images.size()) == 4 assert padding in ['same', 'valid'] batch_size, channel, height, width = images.size() if padding == 'same': images = same_padding(images, ksizes, strides, rates) elif padding == 'valid': pass else: raise NotImplementedError('Unsupported padding type: {}.\ Only "same" or "valid" are supported.'.format(padding)) unfold = torch.nn.Unfold(kernel_size=ksizes, dilation=rates, padding=0, stride=strides) patches = unfold(images) return patches # [N, C*k*k, L], L is the total number of such blocks def reduce_mean(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.mean(x, dim=i, keepdim=keepdim) return x def reduce_std(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.std(x, dim=i, keepdim=keepdim) return x def reduce_sum(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.sum(x, dim=i, keepdim=keepdim) return x
2,777
32.878049
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/SR/code/model/utils/__init__.py
0
0
0
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/main.py
import torch import utility import data import model import loss from option import args from trainer import Trainer torch.manual_seed(args.seed) checkpoint = utility.checkpoint(args) def main(): global model if args.data_test == ['video']: from videotester import VideoTester model = model.Model(args,checkpoint) print('total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0)) t = VideoTester(args, model, checkpoint) t.test() else: if checkpoint.ok: loader = data.Data(args) _model = model.Model(args, checkpoint) #print('total params:%.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0)) _loss = loss.Loss(args, checkpoint) if not args.test_only else None t = Trainer(args, loader, _model, _loss, checkpoint) while not t.terminate(): t.train() t.test() checkpoint.done() if __name__ == '__main__': main()
1,026
27.527778
97
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/utility.py
import os import math import time import datetime from multiprocessing import Process from multiprocessing import Queue import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import imageio import torch import torch.optim as optim import torch.optim.lr_scheduler as lrs class timer(): def __init__(self): self.acc = 0 self.tic() def tic(self): self.t0 = time.time() def toc(self, restart=False): diff = time.time() - self.t0 if restart: self.t0 = time.time() return diff def hold(self): self.acc += self.toc() def release(self): ret = self.acc self.acc = 0 return ret def reset(self): self.acc = 0 class checkpoint(): def __init__(self, args): self.args = args self.ok = True self.log = torch.Tensor() now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S') if not args.load: if not args.save: args.save = now self.dir = os.path.join('..', 'experiment', args.save) else: self.dir = os.path.join('..', 'experiment', args.load) if os.path.exists(self.dir): self.log = torch.load(self.get_path('psnr_log.pt')) print('Continue from epoch {}...'.format(len(self.log))) else: args.load = '' if args.reset: os.system('rm -rf ' + self.dir) args.load = '' os.makedirs(self.dir, exist_ok=True) os.makedirs(self.get_path('model'), exist_ok=True) for d in args.data_test: os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True) open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w' self.log_file = open(self.get_path('log.txt'), open_type) with open(self.get_path('config.txt'), open_type) as f: f.write(now + '\n\n') for arg in vars(args): f.write('{}: {}\n'.format(arg, getattr(args, arg))) f.write('\n') self.n_processes = 8 def get_path(self, *subdir): return os.path.join(self.dir, *subdir) def save(self, trainer, epoch, is_best=False): trainer.model.save(self.get_path('model'), epoch, is_best=is_best) trainer.loss.save(self.dir) trainer.loss.plot_loss(self.dir, epoch) self.plot_psnr(epoch) trainer.optimizer.save(self.dir) torch.save(self.log, self.get_path('psnr_log.pt')) def add_log(self, log): self.log = torch.cat([self.log, log]) def write_log(self, log, refresh=False): print(log) self.log_file.write(log + '\n') if refresh: self.log_file.close() self.log_file = open(self.get_path('log.txt'), 'a') def done(self): self.log_file.close() def plot_psnr(self, epoch): axis = np.linspace(1, epoch, epoch) for idx_data, d in enumerate(self.args.data_test): label = 'SR on {}'.format(d) fig = plt.figure() plt.title(label) for idx_scale, scale in enumerate(self.args.scale): plt.plot( axis, self.log[:, idx_data, idx_scale].numpy(), label='Scale {}'.format(scale) ) plt.legend() plt.xlabel('Epochs') plt.ylabel('PSNR') plt.grid(True) plt.savefig(self.get_path('test_{}.pdf'.format(d))) plt.close(fig) def begin_background(self): self.queue = Queue() def bg_target(queue): while True: if not queue.empty(): filename, tensor = queue.get() if filename is None: break imageio.imwrite(filename, tensor.numpy()) self.process = [ Process(target=bg_target, args=(self.queue,)) \ for _ in range(self.n_processes) ] for p in self.process: p.start() def end_background(self): for _ in range(self.n_processes): self.queue.put((None, None)) while not self.queue.empty(): time.sleep(1) for p in self.process: p.join() def save_results(self, dataset, filename, save_list, scale): if self.args.save_results: filename = self.get_path( 'results-{}'.format(dataset.dataset.name), '{}_x{}_'.format(filename, scale) ) postfix = ('DN', 'LQ', 'HQ') for v, p in zip(save_list, postfix): normalized = v[0].mul(255 / self.args.rgb_range) tensor_cpu = normalized.byte().permute(1, 2, 0).cpu() self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu)) def quantize(img, rgb_range): pixel_range = 255 / rgb_range return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range) def calc_psnr(sr, hr, scale, rgb_range, dataset=None): if hr.nelement() == 1: return 0 diff = (sr - hr) / rgb_range if dataset and dataset.dataset.benchmark: shave = scale if diff.size(1) > 5: gray_coeffs = [65.738, 129.057, 25.064] convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256 diff = diff.mul(convert).sum(dim=1) else: shave = scale + 6 valid = diff[..., :, :] mse = valid.pow(2).mean() return -10 * math.log10(mse) def make_optimizer(args, target): ''' make optimizer and scheduler together ''' # optimizer trainable = filter(lambda x: x.requires_grad, target.parameters()) kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay} if args.optimizer == 'SGD': optimizer_class = optim.SGD kwargs_optimizer['momentum'] = args.momentum elif args.optimizer == 'ADAM': optimizer_class = optim.Adam kwargs_optimizer['betas'] = args.betas kwargs_optimizer['eps'] = args.epsilon elif args.optimizer == 'RMSprop': optimizer_class = optim.RMSprop kwargs_optimizer['eps'] = args.epsilon # scheduler milestones = list(map(lambda x: int(x), args.decay.split('-'))) kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma} scheduler_class = lrs.MultiStepLR class CustomOptimizer(optimizer_class): def __init__(self, *args, **kwargs): super(CustomOptimizer, self).__init__(*args, **kwargs) def _register_scheduler(self, scheduler_class, **kwargs): self.scheduler = scheduler_class(self, **kwargs) def save(self, save_dir): torch.save(self.state_dict(), self.get_dir(save_dir)) def load(self, load_dir, epoch=1): self.load_state_dict(torch.load(self.get_dir(load_dir))) if epoch > 1: for _ in range(epoch): self.scheduler.step() def get_dir(self, dir_path): return os.path.join(dir_path, 'optimizer.pt') def schedule(self): self.scheduler.step() def get_lr(self): return self.scheduler.get_lr()[0] def get_last_epoch(self): return self.scheduler.last_epoch optimizer = CustomOptimizer(trainable, **kwargs_optimizer) optimizer._register_scheduler(scheduler_class, **kwargs_scheduler) return optimizer
7,458
30.340336
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/dataloader.py
import threading import random import torch import torch.multiprocessing as multiprocessing from torch.utils.data import DataLoader from torch.utils.data import SequentialSampler from torch.utils.data import RandomSampler from torch.utils.data import BatchSampler from torch.utils.data import _utils from torch.utils.data.dataloader import _DataLoaderIter from torch.utils.data._utils import collate from torch.utils.data._utils import signal_handling from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL from torch.utils.data._utils import ExceptionWrapper from torch.utils.data._utils import IS_WINDOWS from torch.utils.data._utils.worker import ManagerWatchdog from torch._six import queue def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id): try: collate._use_shared_memory = True signal_handling._set_worker_signal_handlers() torch.set_num_threads(1) random.seed(seed) torch.manual_seed(seed) data_queue.cancel_join_thread() if init_fn is not None: init_fn(worker_id) watchdog = ManagerWatchdog() while watchdog.is_alive(): try: r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) except queue.Empty: continue if r is None: assert done_event.is_set() return elif done_event.is_set(): continue idx, batch_indices = r try: idx_scale = 0 if len(scale) > 1 and dataset.train: idx_scale = random.randrange(0, len(scale)) dataset.set_scale(idx_scale) samples = collate_fn([dataset[i] for i in batch_indices]) samples.append(idx_scale) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples)) del samples except KeyboardInterrupt: pass class _MSDataLoaderIter(_DataLoaderIter): def __init__(self, loader): self.dataset = loader.dataset self.scale = loader.scale self.collate_fn = loader.collate_fn self.batch_sampler = loader.batch_sampler self.num_workers = loader.num_workers self.pin_memory = loader.pin_memory and torch.cuda.is_available() self.timeout = loader.timeout self.sample_iter = iter(self.batch_sampler) base_seed = torch.LongTensor(1).random_().item() if self.num_workers > 0: self.worker_init_fn = loader.worker_init_fn self.worker_queue_idx = 0 self.worker_result_queue = multiprocessing.Queue() self.batches_outstanding = 0 self.worker_pids_set = False self.shutdown = False self.send_idx = 0 self.rcvd_idx = 0 self.reorder_dict = {} self.done_event = multiprocessing.Event() base_seed = torch.LongTensor(1).random_()[0] self.index_queues = [] self.workers = [] for i in range(self.num_workers): index_queue = multiprocessing.Queue() index_queue.cancel_join_thread() w = multiprocessing.Process( target=_ms_loop, args=( self.dataset, index_queue, self.worker_result_queue, self.done_event, self.collate_fn, self.scale, base_seed + i, self.worker_init_fn, i ) ) w.daemon = True w.start() self.index_queues.append(index_queue) self.workers.append(w) if self.pin_memory: self.data_queue = queue.Queue() pin_memory_thread = threading.Thread( target=_utils.pin_memory._pin_memory_loop, args=( self.worker_result_queue, self.data_queue, torch.cuda.current_device(), self.done_event ) ) pin_memory_thread.daemon = True pin_memory_thread.start() self.pin_memory_thread = pin_memory_thread else: self.data_queue = self.worker_result_queue _utils.signal_handling._set_worker_pids( id(self), tuple(w.pid for w in self.workers) ) _utils.signal_handling._set_SIGCHLD_handler() self.worker_pids_set = True for _ in range(2 * self.num_workers): self._put_indices() class MSDataLoader(DataLoader): def __init__(self, cfg, *args, **kwargs): super(MSDataLoader, self).__init__( *args, **kwargs, num_workers=cfg.n_threads ) self.scale = cfg.scale def __iter__(self): return _MSDataLoaderIter(self)
5,259
32.081761
104
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/template.py
def set_template(args): # Set the templates here if args.template.find('jpeg') >= 0: args.data_train = 'DIV2K_jpeg' args.data_test = 'DIV2K_jpeg' args.epochs = 200 args.decay = '100' if args.template.find('EDSR_paper') >= 0: args.model = 'EDSR' args.n_resblocks = 32 args.n_feats = 256 args.res_scale = 0.1 if args.template.find('MDSR') >= 0: args.model = 'MDSR' args.patch_size = 48 args.epochs = 650 if args.template.find('DDBPN') >= 0: args.model = 'DDBPN' args.patch_size = 128 args.scale = '4' args.data_test = 'Set5' args.batch_size = 20 args.epochs = 1000 args.decay = '500' args.gamma = 0.1 args.weight_decay = 1e-4 args.loss = '1*MSE' if args.template.find('GAN') >= 0: args.epochs = 200 args.lr = 5e-5 args.decay = '150' if args.template.find('RCAN') >= 0: args.model = 'RCAN' args.n_resgroups = 10 args.n_resblocks = 20 args.n_feats = 64 args.chop = True if args.template.find('VDSR') >= 0: args.model = 'VDSR' args.n_resblocks = 20 args.n_feats = 64 args.patch_size = 41 args.lr = 1e-1
1,312
23.314815
45
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/option.py
import argparse import template parser = argparse.ArgumentParser(description='EDSR and MDSR') parser.add_argument('--debug', action='store_true', help='Enables debug mode') parser.add_argument('--template', default='.', help='You can set various templates in option.py') # Hardware specifications parser.add_argument('--n_threads', type=int, default=18, help='number of threads for data loading') parser.add_argument('--cpu', action='store_true', help='use cpu only') parser.add_argument('--n_GPUs', type=int, default=1, help='number of GPUs') parser.add_argument('--seed', type=int, default=1, help='random seed') # Data specifications parser.add_argument('--dir_data', type=str, default='../../',help='dataset directory') parser.add_argument('--data_train', type=str, default='DIV2K', help='train dataset name') parser.add_argument('--data_test', type=str, default='DIV2K', help='test dataset name') parser.add_argument('--data_range', type=str, default='1-800/801-805', help='train/test data range') parser.add_argument('--ext', type=str, default='sep', help='dataset file extension') parser.add_argument('--scale', type=str, default='4', help='super resolution scale') parser.add_argument('--patch_size', type=int, default=192, help='output patch size') parser.add_argument('--rgb_range', type=int, default=1, help='maximum value of RGB') parser.add_argument('--n_colors', type=int, default=3, help='number of color channels to use') parser.add_argument('--chop', action='store_true', help='enable memory-efficient forward') parser.add_argument('--no_augment', action='store_true', help='do not use data augmentation') # Model specifications parser.add_argument('--model', default='PANET', help='model name') parser.add_argument('--act', type=str, default='relu', help='activation function') parser.add_argument('--pre_train', type=str, default='.', help='pre-trained model directory') parser.add_argument('--extend', type=str, default='.', help='pre-trained model directory') parser.add_argument('--n_resblocks', type=int, default=16, help='number of residual blocks') parser.add_argument('--n_feats', type=int, default=64, help='number of feature maps') parser.add_argument('--res_scale', type=float, default=1, help='residual scaling') parser.add_argument('--shift_mean', default=True, help='subtract pixel mean from the input') parser.add_argument('--dilation', action='store_true', help='use dilated convolution') parser.add_argument('--precision', type=str, default='single', choices=('single', 'half'), help='FP precision for test (single | half)') # Option for Residual dense network (RDN) parser.add_argument('--G0', type=int, default=64, help='default number of filters. (Use in RDN)') parser.add_argument('--RDNkSize', type=int, default=3, help='default kernel size. (Use in RDN)') parser.add_argument('--RDNconfig', type=str, default='B', help='parameters config of RDN. (Use in RDN)') parser.add_argument('--depth', type=int, default=12, help='number of residual groups') # Option for Residual channel attention network (RCAN) parser.add_argument('--n_resgroups', type=int, default=10, help='number of residual groups') parser.add_argument('--reduction', type=int, default=16, help='number of feature maps reduction') # Training specifications parser.add_argument('--reset', action='store_true', help='reset the training') parser.add_argument('--test_every', type=int, default=1000, help='do test per every N batches') parser.add_argument('--epochs', type=int, default=1000, help='number of epochs to train') parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training') parser.add_argument('--split_batch', type=int, default=1, help='split the batch into smaller chunks') parser.add_argument('--self_ensemble', action='store_true', help='use self-ensemble method for test') parser.add_argument('--test_only', action='store_true', help='set this option to test the model') parser.add_argument('--gan_k', type=int, default=1, help='k value for adversarial loss') # Optimization specifications parser.add_argument('--lr', type=float, default=1e-4, help='learning rate') parser.add_argument('--decay', type=str, default='200-400-600-800', help='learning rate decay type') parser.add_argument('--gamma', type=float, default=0.5, help='learning rate decay factor for step decay') parser.add_argument('--optimizer', default='ADAM', choices=('SGD', 'ADAM', 'RMSprop'), help='optimizer to use (SGD | ADAM | RMSprop)') parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum') parser.add_argument('--betas', type=tuple, default=(0.9, 0.999), help='ADAM beta') parser.add_argument('--epsilon', type=float, default=1e-8, help='ADAM epsilon for numerical stability') parser.add_argument('--weight_decay', type=float, default=0, help='weight decay') parser.add_argument('--gclip', type=float, default=0, help='gradient clipping threshold (0 = no clipping)') # Loss specifications parser.add_argument('--loss', type=str, default='1*L1', help='loss function configuration') parser.add_argument('--skip_threshold', type=float, default='1e8', help='skipping batch that has large error') # Log specifications parser.add_argument('--save', type=str, default='test', help='file name to save') parser.add_argument('--load', type=str, default='', help='file name to load') parser.add_argument('--resume', type=int, default=0, help='resume from specific checkpoint') parser.add_argument('--save_models', action='store_true', help='save all intermediate models') parser.add_argument('--print_every', type=int, default=100, help='how many batches to wait before logging training status') parser.add_argument('--save_results', action='store_true', help='save output results') parser.add_argument('--save_gt', action='store_true', help='save low-resolution and high-resolution images together') args = parser.parse_args() template.set_template(args) args.scale = list(map(lambda x: int(x), args.scale.split('+'))) args.data_train = args.data_train.split('+') args.data_test = args.data_test.split('+') if args.epochs == 0: args.epochs = 1e8 for arg in vars(args): if vars(args)[arg] == 'True': vars(args)[arg] = True elif vars(args)[arg] == 'False': vars(args)[arg] = False
7,465
45.372671
86
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/__init__.py
0
0
0
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/videotester.py
import os import math import utility from data import common import torch import cv2 from tqdm import tqdm class VideoTester(): def __init__(self, args, my_model, ckp): self.args = args self.scale = args.scale self.ckp = ckp self.model = my_model self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo)) def test(self): torch.set_grad_enabled(False) self.ckp.write_log('\nEvaluation on video:') self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) vidwri = cv2.VideoWriter( self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), ( int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) ) ) tqdm_test = tqdm(range(total_frames), ncols=80) for _ in tqdm_test: success, lr = vidcap.read() if not success: break lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) lr, = self.prepare(lr.unsqueeze(0)) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True) def prepare(self, *args): device = torch.device('cpu' if self.args.cpu else 'cuda') def _prepare(tensor): if self.args.precision == 'half': tensor = tensor.half() return tensor.to(device) return [_prepare(a) for a in args]
2,280
30.246575
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/trainer.py
import os import math from decimal import Decimal import utility import torch import torch.nn.utils as utils from tqdm import tqdm class Trainer(): def __init__(self, args, loader, my_model, my_loss, ckp): self.args = args self.scale = args.scale self.ckp = ckp self.loader_train = loader.loader_train self.loader_test = loader.loader_test self.model = my_model self.loss = my_loss self.optimizer = utility.make_optimizer(args, self.model) if self.args.load != '': self.optimizer.load(ckp.dir, epoch=len(ckp.log)) self.error_last = 1e8 def train(self): self.loss.step() epoch = self.optimizer.get_last_epoch() + 1 lr = self.optimizer.get_lr() self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() # TEMP self.loader_train.dataset.set_scale(0) for batch, (lr, hr, _,) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, 0) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_( self.model.parameters(), self.args.gclip ) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1] self.optimizer.schedule() def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() self.ckp.write_log('\nEvaluation:') self.ckp.add_log( torch.zeros(1, len(self.loader_test), len(self.scale)) ) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) for lr, hr, filename in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d ) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1 ) ) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True) def prepare(self, *args): device = torch.device('cpu' if self.args.cpu else 'cuda') def _prepare(tensor): if self.args.precision == 'half': tensor = tensor.half() return tensor.to(device) return [_prepare(a) for a in args] def terminate(self): if self.args.test_only: self.test() return True else: epoch = self.optimizer.get_last_epoch() + 1 return epoch >= self.args.epochs
4,820
31.795918
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/loss/adversarial.py
import utility from types import SimpleNamespace from model import common from loss import discriminator import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim class Adversarial(nn.Module): def __init__(self, args, gan_type): super(Adversarial, self).__init__() self.gan_type = gan_type self.gan_k = args.gan_k self.dis = discriminator.Discriminator(args) if gan_type == 'WGAN_GP': # see https://arxiv.org/pdf/1704.00028.pdf pp.4 optim_dict = { 'optimizer': 'ADAM', 'betas': (0, 0.9), 'epsilon': 1e-8, 'lr': 1e-5, 'weight_decay': args.weight_decay, 'decay': args.decay, 'gamma': args.gamma } optim_args = SimpleNamespace(**optim_dict) else: optim_args = args self.optimizer = utility.make_optimizer(optim_args, self.dis) def forward(self, fake, real): # updating discriminator... self.loss = 0 fake_detach = fake.detach() # do not backpropagate through G for _ in range(self.gan_k): self.optimizer.zero_grad() # d: B x 1 tensor d_fake = self.dis(fake_detach) d_real = self.dis(real) retain_graph = False if self.gan_type == 'GAN': loss_d = self.bce(d_real, d_fake) elif self.gan_type.find('WGAN') >= 0: loss_d = (d_fake - d_real).mean() if self.gan_type.find('GP') >= 0: epsilon = torch.rand_like(fake).view(-1, 1, 1, 1) hat = fake_detach.mul(1 - epsilon) + real.mul(epsilon) hat.requires_grad = True d_hat = self.dis(hat) gradients = torch.autograd.grad( outputs=d_hat.sum(), inputs=hat, retain_graph=True, create_graph=True, only_inputs=True )[0] gradients = gradients.view(gradients.size(0), -1) gradient_norm = gradients.norm(2, dim=1) gradient_penalty = 10 * gradient_norm.sub(1).pow(2).mean() loss_d += gradient_penalty # from ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks elif self.gan_type == 'RGAN': better_real = d_real - d_fake.mean(dim=0, keepdim=True) better_fake = d_fake - d_real.mean(dim=0, keepdim=True) loss_d = self.bce(better_real, better_fake) retain_graph = True # Discriminator update self.loss += loss_d.item() loss_d.backward(retain_graph=retain_graph) self.optimizer.step() if self.gan_type == 'WGAN': for p in self.dis.parameters(): p.data.clamp_(-1, 1) self.loss /= self.gan_k # updating generator... d_fake_bp = self.dis(fake) # for backpropagation, use fake as it is if self.gan_type == 'GAN': label_real = torch.ones_like(d_fake_bp) loss_g = F.binary_cross_entropy_with_logits(d_fake_bp, label_real) elif self.gan_type.find('WGAN') >= 0: loss_g = -d_fake_bp.mean() elif self.gan_type == 'RGAN': better_real = d_real - d_fake_bp.mean(dim=0, keepdim=True) better_fake = d_fake_bp - d_real.mean(dim=0, keepdim=True) loss_g = self.bce(better_fake, better_real) # Generator loss return loss_g def state_dict(self, *args, **kwargs): state_discriminator = self.dis.state_dict(*args, **kwargs) state_optimizer = self.optimizer.state_dict() return dict(**state_discriminator, **state_optimizer) def bce(self, real, fake): label_real = torch.ones_like(real) label_fake = torch.zeros_like(fake) bce_real = F.binary_cross_entropy_with_logits(real, label_real) bce_fake = F.binary_cross_entropy_with_logits(fake, label_fake) bce_loss = bce_real + bce_fake return bce_loss # Some references # https://github.com/kuc2477/pytorch-wgan-gp/blob/master/model.py # OR # https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py
4,393
37.884956
84
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/loss/discriminator.py
from model import common import torch.nn as nn class Discriminator(nn.Module): ''' output is not normalized ''' def __init__(self, args): super(Discriminator, self).__init__() in_channels = args.n_colors out_channels = 64 depth = 7 def _block(_in_channels, _out_channels, stride=1): return nn.Sequential( nn.Conv2d( _in_channels, _out_channels, 3, padding=1, stride=stride, bias=False ), nn.BatchNorm2d(_out_channels), nn.LeakyReLU(negative_slope=0.2, inplace=True) ) m_features = [_block(in_channels, out_channels)] for i in range(depth): in_channels = out_channels if i % 2 == 1: stride = 1 out_channels *= 2 else: stride = 2 m_features.append(_block(in_channels, out_channels, stride=stride)) patch_size = args.patch_size // (2**((depth + 1) // 2)) m_classifier = [ nn.Linear(out_channels * patch_size**2, 1024), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Linear(1024, 1) ] self.features = nn.Sequential(*m_features) self.classifier = nn.Sequential(*m_classifier) def forward(self, x): features = self.features(x) output = self.classifier(features.view(features.size(0), -1)) return output
1,595
27.5
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/loss/vgg.py
from model import common import torch import torch.nn as nn import torch.nn.functional as F import torchvision.models as models class VGG(nn.Module): def __init__(self, conv_index, rgb_range=1): super(VGG, self).__init__() vgg_features = models.vgg19(pretrained=True).features modules = [m for m in vgg_features] if conv_index.find('22') >= 0: self.vgg = nn.Sequential(*modules[:8]) elif conv_index.find('54') >= 0: self.vgg = nn.Sequential(*modules[:35]) vgg_mean = (0.485, 0.456, 0.406) vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range) self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std) for p in self.parameters(): p.requires_grad = False def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x vgg_sr = _forward(sr) with torch.no_grad(): vgg_hr = _forward(hr.detach()) loss = F.mse_loss(vgg_sr, vgg_hr) return loss
1,106
28.918919
75
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/loss/__init__.py
import os from importlib import import_module import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn import torch.nn.functional as F class Loss(nn.modules.loss._Loss): def __init__(self, args, ckp): super(Loss, self).__init__() print('Preparing loss function:') self.n_GPUs = args.n_GPUs self.loss = [] self.loss_module = nn.ModuleList() for loss in args.loss.split('+'): weight, loss_type = loss.split('*') if loss_type == 'MSE': loss_function = nn.MSELoss() elif loss_type == 'L1': loss_function = nn.L1Loss() elif loss_type.find('VGG') >= 0: module = import_module('loss.vgg') loss_function = getattr(module, 'VGG')( loss_type[3:], rgb_range=args.rgb_range ) elif loss_type.find('GAN') >= 0: module = import_module('loss.adversarial') loss_function = getattr(module, 'Adversarial')( args, loss_type ) self.loss.append({ 'type': loss_type, 'weight': float(weight), 'function': loss_function} ) if loss_type.find('GAN') >= 0: self.loss.append({'type': 'DIS', 'weight': 1, 'function': None}) if len(self.loss) > 1: self.loss.append({'type': 'Total', 'weight': 0, 'function': None}) for l in self.loss: if l['function'] is not None: print('{:.3f} * {}'.format(l['weight'], l['type'])) self.loss_module.append(l['function']) self.log = torch.Tensor() device = torch.device('cpu' if args.cpu else 'cuda') self.loss_module.to(device) if args.precision == 'half': self.loss_module.half() if not args.cpu and args.n_GPUs > 1: self.loss_module = nn.DataParallel( self.loss_module, range(args.n_GPUs) ) if args.load != '': self.load(ckp.dir, cpu=args.cpu) def forward(self, sr, hr): losses = [] for i, l in enumerate(self.loss): if l['function'] is not None: loss = l['function'](sr, hr) effective_loss = l['weight'] * loss losses.append(effective_loss) self.log[-1, i] += effective_loss.item() elif l['type'] == 'DIS': self.log[-1, i] += self.loss[i - 1]['function'].loss loss_sum = sum(losses) if len(self.loss) > 1: self.log[-1, -1] += loss_sum.item() return loss_sum def step(self): for l in self.get_loss_module(): if hasattr(l, 'scheduler'): l.scheduler.step() def start_log(self): self.log = torch.cat((self.log, torch.zeros(1, len(self.loss)))) def end_log(self, n_batches): self.log[-1].div_(n_batches) def display_loss(self, batch): n_samples = batch + 1 log = [] for l, c in zip(self.loss, self.log[-1]): log.append('[{}: {:.4f}]'.format(l['type'], c / n_samples)) return ''.join(log) def plot_loss(self, apath, epoch): axis = np.linspace(1, epoch, epoch) for i, l in enumerate(self.loss): label = '{} Loss'.format(l['type']) fig = plt.figure() plt.title(label) plt.plot(axis, self.log[:, i].numpy(), label=label) plt.legend() plt.xlabel('Epochs') plt.ylabel('Loss') plt.grid(True) plt.savefig(os.path.join(apath, 'loss_{}.pdf'.format(l['type']))) plt.close(fig) def get_loss_module(self): if self.n_GPUs == 1: return self.loss_module else: return self.loss_module.module def save(self, apath): torch.save(self.state_dict(), os.path.join(apath, 'loss.pt')) torch.save(self.log, os.path.join(apath, 'loss_log.pt')) def load(self, apath, cpu=False): if cpu: kwargs = {'map_location': lambda storage, loc: storage} else: kwargs = {} self.load_state_dict(torch.load( os.path.join(apath, 'loss.pt'), **kwargs )) self.log = torch.load(os.path.join(apath, 'loss_log.pt')) for l in self.get_loss_module(): if hasattr(l, 'scheduler'): for _ in range(len(self.log)): l.scheduler.step()
4,659
31.361111
80
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/utils/tools.py
import os import torch import numpy as np from PIL import Image import torch.nn.functional as F def normalize(x): return x.mul_(2).add_(-1) def same_padding(images, ksizes, strides, rates): assert len(images.size()) == 4 batch_size, channel, rows, cols = images.size() out_rows = (rows + strides[0] - 1) // strides[0] out_cols = (cols + strides[1] - 1) // strides[1] effective_k_row = (ksizes[0] - 1) * rates[0] + 1 effective_k_col = (ksizes[1] - 1) * rates[1] + 1 padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows) padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols) # Pad the input padding_top = int(padding_rows / 2.) padding_left = int(padding_cols / 2.) padding_bottom = padding_rows - padding_top padding_right = padding_cols - padding_left paddings = (padding_left, padding_right, padding_top, padding_bottom) images = torch.nn.ZeroPad2d(paddings)(images) return images def extract_image_patches(images, ksizes, strides, rates, padding='same'): """ Extract patches from images and put them in the C output dimension. :param padding: :param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape :param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for each dimension of images :param strides: [stride_rows, stride_cols] :param rates: [dilation_rows, dilation_cols] :return: A Tensor """ assert len(images.size()) == 4 assert padding in ['same', 'valid'] batch_size, channel, height, width = images.size() if padding == 'same': images = same_padding(images, ksizes, strides, rates) elif padding == 'valid': pass else: raise NotImplementedError('Unsupported padding type: {}.\ Only "same" or "valid" are supported.'.format(padding)) unfold = torch.nn.Unfold(kernel_size=ksizes, dilation=rates, padding=0, stride=strides) patches = unfold(images) return patches # [N, C*k*k, L], L is the total number of such blocks def reduce_mean(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.mean(x, dim=i, keepdim=keepdim) return x def reduce_std(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.std(x, dim=i, keepdim=keepdim) return x def reduce_sum(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.sum(x, dim=i, keepdim=keepdim) return x
2,777
32.878049
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/utils/__init__.py
0
0
0
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/div2kjpeg.py
import os from data import srdata from data import div2k class DIV2KJPEG(div2k.DIV2K): def __init__(self, args, name='', train=True, benchmark=False): self.q_factor = int(name.replace('DIV2K-Q', '')) super(DIV2KJPEG, self).__init__( args, name=name, train=train, benchmark=benchmark ) def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, 'DIV2K') self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR') self.dir_lr = os.path.join( self.apath, 'DIV2K_Q{}'.format(self.q_factor) ) if self.input_large: self.dir_lr += 'L' self.ext = ('.png', '.jpg')
675
31.190476
67
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/sr291.py
from data import srdata class SR291(srdata.SRData): def __init__(self, args, name='SR291', train=True, benchmark=False): super(SR291, self).__init__(args, name=name)
180
24.857143
72
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/benchmark.py
import os from data import common from data import srdata import numpy as np import torch import torch.utils.data as data class Benchmark(srdata.SRData): def __init__(self, args, name='', train=True, benchmark=True): super(Benchmark, self).__init__( args, name=name, train=train, benchmark=True ) def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, 'benchmark', self.name) self.dir_hr = os.path.join(self.apath, 'HR') if self.input_large: self.dir_lr = os.path.join(self.apath, 'LR_bicubicL') else: self.dir_lr = os.path.join(self.apath, 'LR_bicubic') self.ext = ('', '.png')
703
26.076923
67
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/video.py
import os from data import common import cv2 import numpy as np import imageio import torch import torch.utils.data as data class Video(data.Dataset): def __init__(self, args, name='Video', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.do_eval = False self.benchmark = benchmark self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo)) self.vidcap = cv2.VideoCapture(args.dir_demo) self.n_frames = 0 self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) def __getitem__(self, idx): success, lr = self.vidcap.read() if success: self.n_frames += 1 lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames) else: vidcap.release() return None def __len__(self): return self.total_frames def set_scale(self, idx_scale): self.idx_scale = idx_scale
1,207
25.844444
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/srdata.py
import os import glob import random import pickle from data import common import numpy as np import imageio import torch import torch.utils.data as data class SRData(data.Dataset): def __init__(self, args, name='', train=True, benchmark=False): self.args = args self.name = name self.train = train self.split = 'train' if train else 'test' self.do_eval = True self.benchmark = benchmark self.input_large = (args.model == 'VDSR') self.scale = args.scale self.idx_scale = 0 self._set_filesystem(args.dir_data) if args.ext.find('img') < 0: path_bin = os.path.join(self.apath, 'bin') os.makedirs(path_bin, exist_ok=True) list_hr, list_lr = self._scan() if args.ext.find('img') >= 0 or benchmark: self.images_hr, self.images_lr = list_hr, list_lr elif args.ext.find('sep') >= 0: os.makedirs( self.dir_hr.replace(self.apath, path_bin), exist_ok=True ) for s in self.scale: os.makedirs( os.path.join( self.dir_lr.replace(self.apath, path_bin), 'X{}'.format(s) ), exist_ok=True ) self.images_hr, self.images_lr = [], [[] for _ in self.scale] for h in list_hr: b = h.replace(self.apath, path_bin) b = b.replace(self.ext[0], '.pt') self.images_hr.append(b) self._check_and_load(args.ext, h, b, verbose=True) for i, ll in enumerate(list_lr): for l in ll: b = l.replace(self.apath, path_bin) b = b.replace(self.ext[1], '.pt') self.images_lr[i].append(b) self._check_and_load(args.ext, l, b, verbose=True) if train: n_patches = args.batch_size * args.test_every n_images = len(args.data_train) * len(self.images_hr) if n_images == 0: self.repeat = 0 else: self.repeat = max(n_patches // n_images, 1) # Below functions as used to prepare images def _scan(self): names_hr = sorted( glob.glob(os.path.join(self.dir_hr, '*' + self.ext[0])) ) names_lr = [[] for _ in self.scale] for f in names_hr: filename, _ = os.path.splitext(os.path.basename(f)) for si, s in enumerate(self.scale): names_lr[si].append(os.path.join( self.dir_lr, 'X{}/{}{}'.format( s, filename, self.ext[1] ) )) return names_hr, names_lr def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, self.name) self.dir_hr = os.path.join(self.apath, 'HR') self.dir_lr = os.path.join(self.apath, 'LR_bicubic') if self.input_large: self.dir_lr += 'L' self.ext = ('.png', '.png') def _check_and_load(self, ext, img, f, verbose=True): if not os.path.isfile(f) or ext.find('reset') >= 0: if verbose: print('Making a binary: {}'.format(f)) with open(f, 'wb') as _f: pickle.dump(imageio.imread(img), _f) def __getitem__(self, idx): lr, hr, filename = self._load_file(idx) pair = self.get_patch(lr, hr) pair = common.set_channel(*pair, n_channels=self.args.n_colors) pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) return pair_t[0], pair_t[1], filename def __len__(self): if self.train: return len(self.images_hr) * self.repeat else: return len(self.images_hr) def _get_index(self, idx): if self.train: return idx % len(self.images_hr) else: return idx def _load_file(self, idx): idx = self._get_index(idx) f_hr = self.images_hr[idx] f_lr = self.images_lr[self.idx_scale][idx] filename, _ = os.path.splitext(os.path.basename(f_hr)) if self.args.ext == 'img' or self.benchmark: hr = imageio.imread(f_hr) lr = imageio.imread(f_lr) elif self.args.ext.find('sep') >= 0: with open(f_hr, 'rb') as _f: hr = pickle.load(_f) with open(f_lr, 'rb') as _f: lr = pickle.load(_f) return lr, hr, filename def get_patch(self, lr, hr): scale = self.scale[self.idx_scale] if self.train: lr, hr = common.get_patch( lr, hr, patch_size=self.args.patch_size, scale=scale, multi=(len(self.scale) > 1), input_large=self.input_large ) if not self.args.no_augment: lr, hr = common.augment(lr, hr) else: ih, iw = lr.shape[:2] hr = hr[0:ih * scale, 0:iw * scale] return lr, hr def set_scale(self, idx_scale): if not self.input_large: self.idx_scale = idx_scale else: self.idx_scale = random.randint(0, len(self.scale) - 1)
5,337
32.78481
73
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/demo.py
import os from data import common import numpy as np import imageio import torch import torch.utils.data as data class Demo(data.Dataset): def __init__(self, args, name='Demo', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.benchmark = benchmark self.filelist = [] for f in os.listdir(args.dir_demo): if f.find('.png') >= 0 or f.find('.jp') >= 0: self.filelist.append(os.path.join(args.dir_demo, f)) self.filelist.sort() def __getitem__(self, idx): filename = os.path.splitext(os.path.basename(self.filelist[idx]))[0] lr = imageio.imread(self.filelist[idx]) lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, filename def __len__(self): return len(self.filelist) def set_scale(self, idx_scale): self.idx_scale = idx_scale
1,075
25.9
76
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/common.py
import random import numpy as np import skimage.color as sc import torch def get_patch(*args, patch_size=96, scale=1, multi=False, input_large=False): ih, iw = args[0].shape[:2] if not input_large: p = 1 if multi else 1 tp = p * patch_size ip = tp // 1 else: tp = patch_size ip = patch_size ix = random.randrange(0, iw - ip + 1) iy = random.randrange(0, ih - ip + 1) if not input_large: tx, ty = 1 * ix, 1 * iy else: tx, ty = ix, iy ret = [ args[0][iy:iy + ip, ix:ix + ip, :], *[a[ty:ty + tp, tx:tx + tp, :] for a in args[1:]] ] return ret def set_channel(*args, n_channels=3): def _set_channel(img): if img.ndim == 2: img = np.expand_dims(img, axis=2) c = img.shape[2] if n_channels == 1 and c == 3: img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2) elif n_channels == 3 and c == 1: img = np.concatenate([img] * n_channels, 2) return img return [_set_channel(a) for a in args] def np2Tensor(*args, rgb_range=255): def _np2Tensor(img): np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1))) tensor = torch.from_numpy(np_transpose).float() tensor.mul_(rgb_range / 255) return tensor return [_np2Tensor(a) for a in args] def augment(*args, hflip=True, rot=True): hflip = hflip and random.random() < 0.5 vflip = rot and random.random() < 0.5 rot90 = rot and random.random() < 0.5 def _augment(img): if hflip: img = img[:, ::-1, :] if vflip: img = img[::-1, :, :] if rot90: img = img.transpose(1, 0, 2) return img return [_augment(a) for a in args]
1,770
23.260274
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/__init__.py
from importlib import import_module #from dataloader import MSDataLoader from torch.utils.data import dataloader from torch.utils.data import ConcatDataset # This is a simple wrapper function for ConcatDataset class MyConcatDataset(ConcatDataset): def __init__(self, datasets): super(MyConcatDataset, self).__init__(datasets) self.train = datasets[0].train def set_scale(self, idx_scale): for d in self.datasets: if hasattr(d, 'set_scale'): d.set_scale(idx_scale) class Data: def __init__(self, args): self.loader_train = None if not args.test_only: datasets = [] for d in args.data_train: module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) datasets.append(getattr(m, module_name)(args, name=d)) self.loader_train = dataloader.DataLoader( MyConcatDataset(datasets), batch_size=args.batch_size, shuffle=True, pin_memory=not args.cpu, num_workers=args.n_threads, ) self.loader_test = [] for d in args.data_test: if d in ['CBSD68','Kodak24','Set5', 'Set14', 'B100', 'Urban100']: m = import_module('data.benchmark') testset = getattr(m, 'Benchmark')(args, train=False, name=d) else: module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) testset = getattr(m, module_name)(args, train=False, name=d) self.loader_test.append( dataloader.DataLoader( testset, batch_size=1, shuffle=False, pin_memory=not args.cpu, num_workers=args.n_threads, ) )
1,968
36.150943
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/data/div2k.py
import os from data import srdata class DIV2K(srdata.SRData): def __init__(self, args, name='DIV2K', train=True, benchmark=False): data_range = [r.split('-') for r in args.data_range.split('/')] if train: data_range = data_range[0] else: if args.test_only and len(data_range) == 1: data_range = data_range[0] else: data_range = data_range[1] self.begin, self.end = list(map(lambda x: int(x), data_range)) super(DIV2K, self).__init__( args, name=name, train=train, benchmark=benchmark ) def _scan(self): names_hr, names_lr = super(DIV2K, self)._scan() names_hr = names_hr[self.begin - 1:self.end] names_lr = [n[self.begin - 1:self.end] for n in names_lr] return names_hr, names_lr def _set_filesystem(self, dir_data): super(DIV2K, self)._set_filesystem(dir_data) self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR') self.dir_lr = os.path.join(self.apath, 'DIV2K_train_LR_bicubic') if self.input_large: self.dir_lr += 'L'
1,134
33.393939
72
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/rcan.py
## ECCV-2018-Image Super-Resolution Using Very Deep Residual Channel Attention Networks ## https://arxiv.org/abs/1807.02758 from model import common import torch.nn as nn def make_model(args, parent=False): return RCAN(args) ## Channel Attention (CA) Layer class CALayer(nn.Module): def __init__(self, channel, reduction=16): super(CALayer, self).__init__() # global average pooling: feature --> point self.avg_pool = nn.AdaptiveAvgPool2d(1) # feature channel downscale and upscale --> channel weight self.conv_du = nn.Sequential( nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True), nn.ReLU(inplace=True), nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True), nn.Sigmoid() ) def forward(self, x): y = self.avg_pool(x) y = self.conv_du(y) return x * y ## Residual Channel Attention Block (RCAB) class RCAB(nn.Module): def __init__( self, conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1): super(RCAB, self).__init__() modules_body = [] for i in range(2): modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias)) if bn: modules_body.append(nn.BatchNorm2d(n_feat)) if i == 0: modules_body.append(act) modules_body.append(CALayer(n_feat, reduction)) self.body = nn.Sequential(*modules_body) self.res_scale = res_scale def forward(self, x): res = self.body(x) #res = self.body(x).mul(self.res_scale) res += x return res ## Residual Group (RG) class ResidualGroup(nn.Module): def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks): super(ResidualGroup, self).__init__() modules_body = [] modules_body = [ RCAB( conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1) \ for _ in range(n_resblocks)] modules_body.append(conv(n_feat, n_feat, kernel_size)) self.body = nn.Sequential(*modules_body) def forward(self, x): res = self.body(x) res += x return res ## Residual Channel Attention Network (RCAN) class RCAN(nn.Module): def __init__(self, args, conv=common.default_conv): super(RCAN, self).__init__() n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 reduction = args.reduction scale = args.scale[0] act = nn.ReLU(True) # RGB mean for DIV2K rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module modules_body = [ ResidualGroup( conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \ for _ in range(n_resgroups)] modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail(res) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=False): own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') >= 0: print('Replace pre-trained upsampler to new one...') else: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key "{}" in state_dict' .format(name)) if strict: missing = set(own_state.keys()) - set(state_dict.keys()) if len(missing) > 0: raise KeyError('missing keys in state_dict: "{}"'.format(missing))
5,178
34.717241
116
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/ddbpn.py
# Deep Back-Projection Networks For Super-Resolution # https://arxiv.org/abs/1803.02735 from model import common import torch import torch.nn as nn def make_model(args, parent=False): return DDBPN(args) def projection_conv(in_channels, out_channels, scale, up=True): kernel_size, stride, padding = { 2: (6, 2, 2), 4: (8, 4, 2), 8: (12, 8, 2) }[scale] if up: conv_f = nn.ConvTranspose2d else: conv_f = nn.Conv2d return conv_f( in_channels, out_channels, kernel_size, stride=stride, padding=padding ) class DenseProjection(nn.Module): def __init__(self, in_channels, nr, scale, up=True, bottleneck=True): super(DenseProjection, self).__init__() if bottleneck: self.bottleneck = nn.Sequential(*[ nn.Conv2d(in_channels, nr, 1), nn.PReLU(nr) ]) inter_channels = nr else: self.bottleneck = None inter_channels = in_channels self.conv_1 = nn.Sequential(*[ projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr) ]) self.conv_2 = nn.Sequential(*[ projection_conv(nr, inter_channels, scale, not up), nn.PReLU(inter_channels) ]) self.conv_3 = nn.Sequential(*[ projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr) ]) def forward(self, x): if self.bottleneck is not None: x = self.bottleneck(x) a_0 = self.conv_1(x) b_0 = self.conv_2(a_0) e = b_0.sub(x) a_1 = self.conv_3(e) out = a_0.add(a_1) return out class DDBPN(nn.Module): def __init__(self, args): super(DDBPN, self).__init__() scale = args.scale[0] n0 = 128 nr = 32 self.depth = 6 rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) initial = [ nn.Conv2d(args.n_colors, n0, 3, padding=1), nn.PReLU(n0), nn.Conv2d(n0, nr, 1), nn.PReLU(nr) ] self.initial = nn.Sequential(*initial) self.upmodules = nn.ModuleList() self.downmodules = nn.ModuleList() channels = nr for i in range(self.depth): self.upmodules.append( DenseProjection(channels, nr, scale, True, i > 1) ) if i != 0: channels += nr channels = nr for i in range(self.depth - 1): self.downmodules.append( DenseProjection(channels, nr, scale, False, i != 0) ) channels += nr reconstruction = [ nn.Conv2d(self.depth * nr, args.n_colors, 3, padding=1) ] self.reconstruction = nn.Sequential(*reconstruction) self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) def forward(self, x): x = self.sub_mean(x) x = self.initial(x) h_list = [] l_list = [] for i in range(self.depth - 1): if i == 0: l = x else: l = torch.cat(l_list, dim=1) h_list.append(self.upmodules[i](l)) l_list.append(self.downmodules[i](torch.cat(h_list, dim=1))) h_list.append(self.upmodules[-1](torch.cat(l_list, dim=1))) out = self.reconstruction(torch.cat(h_list, dim=1)) out = self.add_mean(out) return out
3,629
26.5
78
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/rdn.py
# Residual Dense Network for Image Super-Resolution # https://arxiv.org/abs/1802.08797 from model import common import torch import torch.nn as nn def make_model(args, parent=False): return RDN(args) class RDB_Conv(nn.Module): def __init__(self, inChannels, growRate, kSize=3): super(RDB_Conv, self).__init__() Cin = inChannels G = growRate self.conv = nn.Sequential(*[ nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1), nn.ReLU() ]) def forward(self, x): out = self.conv(x) return torch.cat((x, out), 1) class RDB(nn.Module): def __init__(self, growRate0, growRate, nConvLayers, kSize=3): super(RDB, self).__init__() G0 = growRate0 G = growRate C = nConvLayers convs = [] for c in range(C): convs.append(RDB_Conv(G0 + c*G, G)) self.convs = nn.Sequential(*convs) # Local Feature Fusion self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1) def forward(self, x): return self.LFF(self.convs(x)) + x class RDN(nn.Module): def __init__(self, args): super(RDN, self).__init__() r = args.scale[0] G0 = args.G0 kSize = args.RDNkSize # number of RDB blocks, conv layers, out channels self.D, C, G = { 'A': (20, 6, 32), 'B': (16, 8, 64), }[args.RDNconfig] # Shallow feature extraction net self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1) self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) # Redidual dense blocks and dense feature fusion self.RDBs = nn.ModuleList() for i in range(self.D): self.RDBs.append( RDB(growRate0 = G0, growRate = G, nConvLayers = C) ) # Global Feature Fusion self.GFF = nn.Sequential(*[ nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1), nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) ]) # Up-sampling net if r == 2 or r == 3: self.UPNet = nn.Sequential(*[ nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(r), nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) ]) elif r == 4: self.UPNet = nn.Sequential(*[ nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(2), nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(2), nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) ]) else: raise ValueError("scale must be 2 or 3 or 4.") def forward(self, x): f__1 = self.SFENet1(x) x = self.SFENet2(f__1) RDBs_out = [] for i in range(self.D): x = self.RDBs[i](x) RDBs_out.append(x) x = self.GFF(torch.cat(RDBs_out,1)) x += f__1 return self.UPNet(x)
3,202
29.216981
90
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/mdsr.py
from model import common import torch.nn as nn def make_model(args, parent=False): return MDSR(args) class MDSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(MDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 self.scale_idx = 0 act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) m_head = [conv(args.n_colors, n_feats, kernel_size)] self.pre_process = nn.ModuleList([ nn.Sequential( common.ResBlock(conv, n_feats, 5, act=act), common.ResBlock(conv, n_feats, 5, act=act) ) for _ in args.scale ]) m_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) self.upsample = nn.ModuleList([ common.Upsampler( conv, s, n_feats, act=False ) for s in args.scale ]) m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) x = self.pre_process[self.scale_idx](x) res = self.body(x) res += x x = self.upsample[self.scale_idx](res) x = self.tail(x) x = self.add_mean(x) return x def set_scale(self, scale_idx): self.scale_idx = scale_idx
1,837
25.637681
78
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/common.py
import math import torch import torch.nn as nn import torch.nn.functional as F def default_conv(in_channels, out_channels, kernel_size,stride=1, bias=True): return nn.Conv2d( in_channels, out_channels, kernel_size, padding=(kernel_size//2),stride=stride, bias=bias) class MeanShift(nn.Conv2d): def __init__( self, rgb_range, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1): super(MeanShift, self).__init__(3, 3, kernel_size=1) std = torch.Tensor(rgb_std) self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1) self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std for p in self.parameters(): p.requires_grad = False class BasicBlock(nn.Sequential): def __init__( self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True, bn=False, act=nn.PReLU()): m = [conv(in_channels, out_channels, kernel_size, bias=bias)] if bn: m.append(nn.BatchNorm2d(out_channels)) if act is not None: m.append(act) super(BasicBlock, self).__init__(*m) class ResBlock(nn.Module): def __init__( self, conv, n_feats, kernel_size, bias=True, bn=False, act=nn.PReLU(), res_scale=1): super(ResBlock, self).__init__() m = [] for i in range(2): m.append(conv(n_feats, n_feats, kernel_size, bias=bias)) if bn: m.append(nn.BatchNorm2d(n_feats)) if i == 0: m.append(act) self.body = nn.Sequential(*m) self.res_scale = res_scale def forward(self, x): res = self.body(x).mul(self.res_scale) res += x return res class Upsampler(nn.Sequential): def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feats, 4 * n_feats, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) elif scale == 3: m.append(conv(n_feats, 9 * n_feats, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
2,799
30.460674
80
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/__init__.py
import os from importlib import import_module import torch import torch.nn as nn from torch.autograd import Variable class Model(nn.Module): def __init__(self, args, ckp): super(Model, self).__init__() print('Making model...') self.scale = args.scale self.idx_scale = 0 self.self_ensemble = args.self_ensemble self.chop = args.chop self.precision = args.precision self.cpu = args.cpu self.device = torch.device('cpu' if args.cpu else 'cuda') self.n_GPUs = args.n_GPUs self.save_models = args.save_models module = import_module('model.' + args.model.lower()) self.model = module.make_model(args).to(self.device) if args.precision == 'half': self.model.half() if not args.cpu and args.n_GPUs > 1: self.model = nn.DataParallel(self.model, range(args.n_GPUs)) self.load( ckp.dir, pre_train=args.pre_train, resume=args.resume, cpu=args.cpu ) print(self.model, file=ckp.log_file) def forward(self, x, idx_scale): self.idx_scale = idx_scale target = self.get_model() if hasattr(target, 'set_scale'): target.set_scale(idx_scale) if self.self_ensemble and not self.training: if self.chop: forward_function = self.forward_chop else: forward_function = self.model.forward return self.forward_x8(x, forward_function) elif self.chop and not self.training: return self.forward_chop(x) else: return self.model(x) def get_model(self): if self.n_GPUs == 1: return self.model else: return self.model.module def state_dict(self, **kwargs): target = self.get_model() return target.state_dict(**kwargs) def save(self, apath, epoch, is_best=False): target = self.get_model() torch.save( target.state_dict(), os.path.join(apath, 'model_latest.pt') ) if is_best: torch.save( target.state_dict(), os.path.join(apath, 'model_best.pt') ) if self.save_models: torch.save( target.state_dict(), os.path.join(apath, 'model_{}.pt'.format(epoch)) ) def load(self, apath, pre_train='.', resume=-1, cpu=False): if cpu: kwargs = {'map_location': lambda storage, loc: storage} else: kwargs = {} if resume == -1: self.get_model().load_state_dict( torch.load( os.path.join(apath, 'model_latest.pt'), **kwargs ), strict=False ) elif resume == 0: if pre_train != '.': print('Loading model from {}'.format(pre_train)) self.get_model().load_state_dict( torch.load(pre_train, **kwargs), strict=False ) else: self.get_model().load_state_dict( torch.load( os.path.join(apath, 'model', 'model_{}.pt'.format(resume)), **kwargs ), strict=False ) def forward_chop(self, x, shave=10, min_size=6800): scale = self.scale[self.idx_scale] scale = 1 n_GPUs = min(self.n_GPUs, 4) b, c, h, w = x.size() h_half, w_half = h // 2, w // 2 h_size, w_size = h_half + shave, w_half + shave lr_list = [ x[:, :, 0:h_size, 0:w_size], x[:, :, 0:h_size, (w - w_size):w], x[:, :, (h - h_size):h, 0:w_size], x[:, :, (h - h_size):h, (w - w_size):w]] if w_size * h_size < min_size: sr_list = [] for i in range(0, 4, n_GPUs): lr_batch = torch.cat(lr_list[i:(i + n_GPUs)], dim=0) sr_batch = self.model(lr_batch) sr_list.extend(sr_batch.chunk(n_GPUs, dim=0)) else: sr_list = [ self.forward_chop(patch, shave=shave, min_size=min_size) \ for patch in lr_list ] h, w = scale * h, scale * w h_half, w_half = scale * h_half, scale * w_half h_size, w_size = scale * h_size, scale * w_size shave *= scale output = x.new(b, c, h, w) output[:, :, 0:h_half, 0:w_half] \ = sr_list[0][:, :, 0:h_half, 0:w_half] output[:, :, 0:h_half, w_half:w] \ = sr_list[1][:, :, 0:h_half, (w_size - w + w_half):w_size] output[:, :, h_half:h, 0:w_half] \ = sr_list[2][:, :, (h_size - h + h_half):h_size, 0:w_half] output[:, :, h_half:h, w_half:w] \ = sr_list[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size] return output def forward_x8(self, x, forward_function): def _transform(v, op): if self.precision != 'single': v = v.float() v2np = v.data.cpu().numpy() if op == 'v': tfnp = v2np[:, :, :, ::-1].copy() elif op == 'h': tfnp = v2np[:, :, ::-1, :].copy() elif op == 't': tfnp = v2np.transpose((0, 1, 3, 2)).copy() ret = torch.Tensor(tfnp).to(self.device) if self.precision == 'half': ret = ret.half() return ret lr_list = [x] for tf in 'v', 'h', 't': lr_list.extend([_transform(t, tf) for t in lr_list]) sr_list = [forward_function(aug) for aug in lr_list] for i in range(len(sr_list)): if i > 3: sr_list[i] = _transform(sr_list[i], 't') if i % 4 > 1: sr_list[i] = _transform(sr_list[i], 'h') if (i % 4) % 2 == 1: sr_list[i] = _transform(sr_list[i], 'v') output_cat = torch.cat(sr_list, dim=0) output = output_cat.mean(dim=0, keepdim=True) return output
6,200
31.465969
90
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/panet.py
from model import common from model import attention import torch.nn as nn def make_model(args, parent=False): return PANET(args) class PANET(nn.Module): def __init__(self, args, conv=common.default_conv): super(PANET, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) msa = attention.PyramidAttention() # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale ) for _ in range(n_resblocks//2) ] m_body.append(msa) for i in range(n_resblocks//2): m_body.append(common.ResBlock(conv,n_feats,kernel_size,nn.PReLU(),res_scale=args.res_scale)) m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module #m_tail = [ # common.Upsampler(conv, scale, n_feats, act=False), # conv(n_feats, args.n_colors, kernel_size) #] m_tail = [ conv(n_feats, args.n_colors, kernel_size) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x): #x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail(res) #x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key "{}" in state_dict' .format(name))
2,779
32.493976
104
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/attention.py
import torch import torch.nn as nn import torch.nn.functional as F from torchvision import transforms from torchvision import utils as vutils from model import common from utils.tools import extract_image_patches,\ reduce_mean, reduce_sum, same_padding class PyramidAttention(nn.Module): def __init__(self, level=5, res_scale=1, channel=64, reduction=2, ksize=3, stride=1, softmax_scale=10, average=True, conv=common.default_conv): super(PyramidAttention, self).__init__() self.ksize = ksize self.stride = stride self.res_scale = res_scale self.softmax_scale = softmax_scale self.scale = [1-i/10 for i in range(level)] self.average = average escape_NaN = torch.FloatTensor([1e-4]) self.register_buffer('escape_NaN', escape_NaN) self.conv_match_L_base = common.BasicBlock(conv,channel,channel//reduction, 1, bn=False, act=nn.PReLU()) self.conv_match = common.BasicBlock(conv,channel, channel//reduction, 1, bn=False, act=nn.PReLU()) self.conv_assembly = common.BasicBlock(conv,channel, channel,1,bn=False, act=nn.PReLU()) def forward(self, input): res = input #theta match_base = self.conv_match_L_base(input) shape_base = list(res.size()) input_groups = torch.split(match_base,1,dim=0) # patch size for matching kernel = self.ksize # raw_w is for reconstruction raw_w = [] # w is for matching w = [] #build feature pyramid for i in range(len(self.scale)): ref = input if self.scale[i]!=1: ref = F.interpolate(input, scale_factor=self.scale[i], mode='bicubic') #feature transformation function f base = self.conv_assembly(ref) shape_input = base.shape #sampling raw_w_i = extract_image_patches(base, ksizes=[kernel, kernel], strides=[self.stride,self.stride], rates=[1, 1], padding='same') # [N, C*k*k, L] raw_w_i = raw_w_i.view(shape_input[0], shape_input[1], kernel, kernel, -1) raw_w_i = raw_w_i.permute(0, 4, 1, 2, 3) # raw_shape: [N, L, C, k, k] raw_w_i_groups = torch.split(raw_w_i, 1, dim=0) raw_w.append(raw_w_i_groups) #feature transformation function g ref_i = self.conv_match(ref) shape_ref = ref_i.shape #sampling w_i = extract_image_patches(ref_i, ksizes=[self.ksize, self.ksize], strides=[self.stride, self.stride], rates=[1, 1], padding='same') w_i = w_i.view(shape_ref[0], shape_ref[1], self.ksize, self.ksize, -1) w_i = w_i.permute(0, 4, 1, 2, 3) # w shape: [N, L, C, k, k] w_i_groups = torch.split(w_i, 1, dim=0) w.append(w_i_groups) y = [] for idx, xi in enumerate(input_groups): #group in a filter wi = torch.cat([w[i][idx][0] for i in range(len(self.scale))],dim=0) # [L, C, k, k] #normalize max_wi = torch.max(torch.sqrt(reduce_sum(torch.pow(wi, 2), axis=[1, 2, 3], keepdim=True)), self.escape_NaN) wi_normed = wi/ max_wi #matching xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1]) # xi: 1*c*H*W yi = F.conv2d(xi, wi_normed, stride=1) # [1, L, H, W] L = shape_ref[2]*shape_ref[3] yi = yi.view(1,wi.shape[0], shape_base[2], shape_base[3]) # (B=1, C=32*32, H=32, W=32) # softmax matching score yi = F.softmax(yi*self.softmax_scale, dim=1) if self.average == False: yi = (yi == yi.max(dim=1,keepdim=True)[0]).float() # deconv for patch pasting raw_wi = torch.cat([raw_w[i][idx][0] for i in range(len(self.scale))],dim=0) yi = F.conv_transpose2d(yi, raw_wi, stride=self.stride,padding=1)/4. y.append(yi) y = torch.cat(y, dim=0)+res*self.res_scale # back to the mini-batch return y
4,427
46.106383
147
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/vdsr.py
from model import common import torch.nn as nn import torch.nn.init as init url = { 'r20f64': '' } def make_model(args, parent=False): return VDSR(args) class VDSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(VDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 self.url = url['r{}f{}'.format(n_resblocks, n_feats)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) def basic_block(in_channels, out_channels, act): return common.BasicBlock( conv, in_channels, out_channels, kernel_size, bias=True, bn=False, act=act ) # define body module m_body = [] m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True))) for _ in range(n_resblocks - 2): m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True))) m_body.append(basic_block(n_feats, args.n_colors, None)) self.body = nn.Sequential(*m_body) def forward(self, x): x = self.sub_mean(x) res = self.body(x) res += x x = self.add_mean(res) return x
1,275
26.148936
73
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/utils/tools.py
import os import torch import numpy as np from PIL import Image import torch.nn.functional as F def normalize(x): return x.mul_(2).add_(-1) def same_padding(images, ksizes, strides, rates): assert len(images.size()) == 4 batch_size, channel, rows, cols = images.size() out_rows = (rows + strides[0] - 1) // strides[0] out_cols = (cols + strides[1] - 1) // strides[1] effective_k_row = (ksizes[0] - 1) * rates[0] + 1 effective_k_col = (ksizes[1] - 1) * rates[1] + 1 padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows) padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols) # Pad the input padding_top = int(padding_rows / 2.) padding_left = int(padding_cols / 2.) padding_bottom = padding_rows - padding_top padding_right = padding_cols - padding_left paddings = (padding_left, padding_right, padding_top, padding_bottom) images = torch.nn.ZeroPad2d(paddings)(images) return images def extract_image_patches(images, ksizes, strides, rates, padding='same'): """ Extract patches from images and put them in the C output dimension. :param padding: :param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape :param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for each dimension of images :param strides: [stride_rows, stride_cols] :param rates: [dilation_rows, dilation_cols] :return: A Tensor """ assert len(images.size()) == 4 assert padding in ['same', 'valid'] batch_size, channel, height, width = images.size() if padding == 'same': images = same_padding(images, ksizes, strides, rates) elif padding == 'valid': pass else: raise NotImplementedError('Unsupported padding type: {}.\ Only "same" or "valid" are supported.'.format(padding)) unfold = torch.nn.Unfold(kernel_size=ksizes, dilation=rates, padding=0, stride=strides) patches = unfold(images) return patches # [N, C*k*k, L], L is the total number of such blocks def reduce_mean(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.mean(x, dim=i, keepdim=keepdim) return x def reduce_std(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.std(x, dim=i, keepdim=keepdim) return x def reduce_sum(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.sum(x, dim=i, keepdim=keepdim) return x
2,777
32.878049
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/DN_RGB/code/model/utils/__init__.py
0
0
0
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/main.py
import torch import utility import data import model import loss from option import args from trainer import Trainer torch.manual_seed(args.seed) checkpoint = utility.checkpoint(args) def main(): global model if args.data_test == ['video']: from videotester import VideoTester model = model.Model(args,checkpoint) print('total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0)) t = VideoTester(args, model, checkpoint) t.test() else: if checkpoint.ok: loader = data.Data(args) _model = model.Model(args, checkpoint) print('total params:%.2fM' % (sum(p.numel() for p in _model.parameters())/1000000.0)) _loss = loss.Loss(args, checkpoint) if not args.test_only else None t = Trainer(args, loader, _model, _loss, checkpoint) while not t.terminate(): t.train() t.test() checkpoint.done() if __name__ == '__main__': main()
1,026
27.527778
97
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/utility.py
import os import math import time import datetime from multiprocessing import Process from multiprocessing import Queue import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import imageio import torch import torch.optim as optim import torch.optim.lr_scheduler as lrs class timer(): def __init__(self): self.acc = 0 self.tic() def tic(self): self.t0 = time.time() def toc(self, restart=False): diff = time.time() - self.t0 if restart: self.t0 = time.time() return diff def hold(self): self.acc += self.toc() def release(self): ret = self.acc self.acc = 0 return ret def reset(self): self.acc = 0 class checkpoint(): def __init__(self, args): self.args = args self.ok = True self.log = torch.Tensor() now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S') if not args.load: if not args.save: args.save = now self.dir = os.path.join('..', 'experiment', args.save) else: self.dir = os.path.join('..', 'experiment', args.load) if os.path.exists(self.dir): self.log = torch.load(self.get_path('psnr_log.pt')) print('Continue from epoch {}...'.format(len(self.log))) else: args.load = '' if args.reset: os.system('rm -rf ' + self.dir) args.load = '' os.makedirs(self.dir, exist_ok=True) os.makedirs(self.get_path('model'), exist_ok=True) for d in args.data_test: os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True) open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w' self.log_file = open(self.get_path('log.txt'), open_type) with open(self.get_path('config.txt'), open_type) as f: f.write(now + '\n\n') for arg in vars(args): f.write('{}: {}\n'.format(arg, getattr(args, arg))) f.write('\n') self.n_processes = 8 def get_path(self, *subdir): return os.path.join(self.dir, *subdir) def save(self, trainer, epoch, is_best=False): trainer.model.save(self.get_path('model'), epoch, is_best=is_best) trainer.loss.save(self.dir) trainer.loss.plot_loss(self.dir, epoch) self.plot_psnr(epoch) trainer.optimizer.save(self.dir) torch.save(self.log, self.get_path('psnr_log.pt')) def add_log(self, log): self.log = torch.cat([self.log, log]) def write_log(self, log, refresh=False): print(log) self.log_file.write(log + '\n') if refresh: self.log_file.close() self.log_file = open(self.get_path('log.txt'), 'a') def done(self): self.log_file.close() def plot_psnr(self, epoch): axis = np.linspace(1, epoch, epoch) for idx_data, d in enumerate(self.args.data_test): label = 'SR on {}'.format(d) fig = plt.figure() plt.title(label) for idx_scale, scale in enumerate(self.args.scale): plt.plot( axis, self.log[:, idx_data, idx_scale].numpy(), label='Scale {}'.format(scale) ) plt.legend() plt.xlabel('Epochs') plt.ylabel('PSNR') plt.grid(True) plt.savefig(self.get_path('test_{}.pdf'.format(d))) plt.close(fig) def begin_background(self): self.queue = Queue() def bg_target(queue): while True: if not queue.empty(): filename, tensor = queue.get() if filename is None: break imageio.imwrite(filename, tensor.numpy()) self.process = [ Process(target=bg_target, args=(self.queue,)) \ for _ in range(self.n_processes) ] for p in self.process: p.start() def end_background(self): for _ in range(self.n_processes): self.queue.put((None, None)) while not self.queue.empty(): time.sleep(1) for p in self.process: p.join() def save_results(self, dataset, filename, save_list, scale): if self.args.save_results: filename = self.get_path( 'results-{}'.format(dataset.dataset.name), '{}_x{}_'.format(filename, scale) ) postfix = ('CAR', 'LQ', 'HQ') for v, p in zip(save_list, postfix): normalized = v[0].mul(255 / self.args.rgb_range) tensor_cpu = normalized.byte().permute(1, 2, 0).cpu() self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu)) def quantize(img, rgb_range): pixel_range = 255 / rgb_range return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range) def calc_psnr(sr, hr, scale, rgb_range, dataset=None): if hr.nelement() == 1: return 0 diff = (sr - hr) / rgb_range if dataset and dataset.dataset.benchmark: shave = scale if diff.size(1) > 5: gray_coeffs = [65.738, 129.057, 25.064] convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256 diff = diff.mul(convert).sum(dim=1) else: shave = scale + 6 valid = diff[..., :, :] mse = valid.pow(2).mean() return -10 * math.log10(mse) def make_optimizer(args, target): ''' make optimizer and scheduler together ''' # optimizer trainable = filter(lambda x: x.requires_grad, target.parameters()) kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay} if args.optimizer == 'SGD': optimizer_class = optim.SGD kwargs_optimizer['momentum'] = args.momentum elif args.optimizer == 'ADAM': optimizer_class = optim.Adam kwargs_optimizer['betas'] = args.betas kwargs_optimizer['eps'] = args.epsilon elif args.optimizer == 'RMSprop': optimizer_class = optim.RMSprop kwargs_optimizer['eps'] = args.epsilon # scheduler milestones = list(map(lambda x: int(x), args.decay.split('-'))) kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma} scheduler_class = lrs.MultiStepLR class CustomOptimizer(optimizer_class): def __init__(self, *args, **kwargs): super(CustomOptimizer, self).__init__(*args, **kwargs) def _register_scheduler(self, scheduler_class, **kwargs): self.scheduler = scheduler_class(self, **kwargs) def save(self, save_dir): torch.save(self.state_dict(), self.get_dir(save_dir)) def load(self, load_dir, epoch=1): self.load_state_dict(torch.load(self.get_dir(load_dir))) if epoch > 1: for _ in range(epoch): self.scheduler.step() def get_dir(self, dir_path): return os.path.join(dir_path, 'optimizer.pt') def schedule(self): self.scheduler.step() def get_lr(self): return self.scheduler.get_lr()[0] def get_last_epoch(self): return self.scheduler.last_epoch optimizer = CustomOptimizer(trainable, **kwargs_optimizer) optimizer._register_scheduler(scheduler_class, **kwargs_scheduler) return optimizer
7,459
30.344538
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/dataloader.py
import threading import random import torch import torch.multiprocessing as multiprocessing from torch.utils.data import DataLoader from torch.utils.data import SequentialSampler from torch.utils.data import RandomSampler from torch.utils.data import BatchSampler from torch.utils.data import _utils from torch.utils.data.dataloader import _DataLoaderIter from torch.utils.data._utils import collate from torch.utils.data._utils import signal_handling from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL from torch.utils.data._utils import ExceptionWrapper from torch.utils.data._utils import IS_WINDOWS from torch.utils.data._utils.worker import ManagerWatchdog from torch._six import queue def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id): try: collate._use_shared_memory = True signal_handling._set_worker_signal_handlers() torch.set_num_threads(1) random.seed(seed) torch.manual_seed(seed) data_queue.cancel_join_thread() if init_fn is not None: init_fn(worker_id) watchdog = ManagerWatchdog() while watchdog.is_alive(): try: r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) except queue.Empty: continue if r is None: assert done_event.is_set() return elif done_event.is_set(): continue idx, batch_indices = r try: idx_scale = 0 if len(scale) > 1 and dataset.train: idx_scale = random.randrange(0, len(scale)) dataset.set_scale(idx_scale) samples = collate_fn([dataset[i] for i in batch_indices]) samples.append(idx_scale) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples)) del samples except KeyboardInterrupt: pass class _MSDataLoaderIter(_DataLoaderIter): def __init__(self, loader): self.dataset = loader.dataset self.scale = loader.scale self.collate_fn = loader.collate_fn self.batch_sampler = loader.batch_sampler self.num_workers = loader.num_workers self.pin_memory = loader.pin_memory and torch.cuda.is_available() self.timeout = loader.timeout self.sample_iter = iter(self.batch_sampler) base_seed = torch.LongTensor(1).random_().item() if self.num_workers > 0: self.worker_init_fn = loader.worker_init_fn self.worker_queue_idx = 0 self.worker_result_queue = multiprocessing.Queue() self.batches_outstanding = 0 self.worker_pids_set = False self.shutdown = False self.send_idx = 0 self.rcvd_idx = 0 self.reorder_dict = {} self.done_event = multiprocessing.Event() base_seed = torch.LongTensor(1).random_()[0] self.index_queues = [] self.workers = [] for i in range(self.num_workers): index_queue = multiprocessing.Queue() index_queue.cancel_join_thread() w = multiprocessing.Process( target=_ms_loop, args=( self.dataset, index_queue, self.worker_result_queue, self.done_event, self.collate_fn, self.scale, base_seed + i, self.worker_init_fn, i ) ) w.daemon = True w.start() self.index_queues.append(index_queue) self.workers.append(w) if self.pin_memory: self.data_queue = queue.Queue() pin_memory_thread = threading.Thread( target=_utils.pin_memory._pin_memory_loop, args=( self.worker_result_queue, self.data_queue, torch.cuda.current_device(), self.done_event ) ) pin_memory_thread.daemon = True pin_memory_thread.start() self.pin_memory_thread = pin_memory_thread else: self.data_queue = self.worker_result_queue _utils.signal_handling._set_worker_pids( id(self), tuple(w.pid for w in self.workers) ) _utils.signal_handling._set_SIGCHLD_handler() self.worker_pids_set = True for _ in range(2 * self.num_workers): self._put_indices() class MSDataLoader(DataLoader): def __init__(self, cfg, *args, **kwargs): super(MSDataLoader, self).__init__( *args, **kwargs, num_workers=cfg.n_threads ) self.scale = cfg.scale def __iter__(self): return _MSDataLoaderIter(self)
5,259
32.081761
104
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/template.py
def set_template(args): # Set the templates here if args.template.find('jpeg') >= 0: args.data_train = 'DIV2K_jpeg' args.data_test = 'DIV2K_jpeg' args.epochs = 200 args.decay = '100' if args.template.find('EDSR_paper') >= 0: args.model = 'EDSR' args.n_resblocks = 32 args.n_feats = 256 args.res_scale = 0.1 if args.template.find('MDSR') >= 0: args.model = 'MDSR' args.patch_size = 48 args.epochs = 650 if args.template.find('DDBPN') >= 0: args.model = 'DDBPN' args.patch_size = 128 args.scale = '4' args.data_test = 'Set5' args.batch_size = 20 args.epochs = 1000 args.decay = '500' args.gamma = 0.1 args.weight_decay = 1e-4 args.loss = '1*MSE' if args.template.find('GAN') >= 0: args.epochs = 200 args.lr = 5e-5 args.decay = '150' if args.template.find('RCAN') >= 0: args.model = 'RCAN' args.n_resgroups = 10 args.n_resblocks = 20 args.n_feats = 64 args.chop = True if args.template.find('VDSR') >= 0: args.model = 'VDSR' args.n_resblocks = 20 args.n_feats = 64 args.patch_size = 41 args.lr = 1e-1
1,312
23.314815
45
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/option.py
import argparse import template parser = argparse.ArgumentParser(description='EDSR and MDSR') parser.add_argument('--debug', action='store_true', help='Enables debug mode') parser.add_argument('--template', default='.', help='You can set various templates in option.py') # Hardware specifications parser.add_argument('--n_threads', type=int, default=18, help='number of threads for data loading') parser.add_argument('--cpu', action='store_true', help='use cpu only') parser.add_argument('--n_GPUs', type=int, default=1, help='number of GPUs') parser.add_argument('--seed', type=int, default=1, help='random seed') # Data specifications parser.add_argument('--dir_data', type=str, default='../../../',help='dataset directory') parser.add_argument('--data_train', type=str, default='DIV2K', help='train dataset name') parser.add_argument('--data_test', type=str, default='DIV2K', help='test dataset name') parser.add_argument('--data_range', type=str, default='1-800/801-805', help='train/test data range') parser.add_argument('--ext', type=str, default='sep', help='dataset file extension') parser.add_argument('--scale', type=str, default='4', help='super resolution scale') parser.add_argument('--patch_size', type=int, default=192, help='output patch size') parser.add_argument('--rgb_range', type=int, default=1, help='maximum value of RGB') parser.add_argument('--n_colors', type=int, default=1, help='number of color channels to use') parser.add_argument('--chop', action='store_true', help='enable memory-efficient forward') parser.add_argument('--no_augment', action='store_true', help='do not use data augmentation') # Model specifications parser.add_argument('--model', default='EDSR', help='model name') parser.add_argument('--act', type=str, default='relu', help='activation function') parser.add_argument('--pre_train', type=str, default='.', help='pre-trained model directory') parser.add_argument('--extend', type=str, default='.', help='pre-trained model directory') parser.add_argument('--n_resblocks', type=int, default=16, help='number of residual blocks') parser.add_argument('--n_feats', type=int, default=64, help='number of feature maps') parser.add_argument('--res_scale', type=float, default=1, help='residual scaling') parser.add_argument('--shift_mean', default=True, help='subtract pixel mean from the input') parser.add_argument('--dilation', action='store_true', help='use dilated convolution') parser.add_argument('--precision', type=str, default='single', choices=('single', 'half'), help='FP precision for test (single | half)') # Option for Residual dense network (RDN) parser.add_argument('--G0', type=int, default=64, help='default number of filters. (Use in RDN)') parser.add_argument('--RDNkSize', type=int, default=3, help='default kernel size. (Use in RDN)') parser.add_argument('--RDNconfig', type=str, default='B', help='parameters config of RDN. (Use in RDN)') parser.add_argument('--depth', type=int, default=12, help='number of residual groups') # Option for Residual channel attention network (RCAN) parser.add_argument('--n_resgroups', type=int, default=10, help='number of residual groups') parser.add_argument('--reduction', type=int, default=16, help='number of feature maps reduction') # Training specifications parser.add_argument('--reset', action='store_true', help='reset the training') parser.add_argument('--test_every', type=int, default=1000, help='do test per every N batches') parser.add_argument('--epochs', type=int, default=1000, help='number of epochs to train') parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training') parser.add_argument('--split_batch', type=int, default=1, help='split the batch into smaller chunks') parser.add_argument('--self_ensemble', action='store_true', help='use self-ensemble method for test') parser.add_argument('--test_only', action='store_true', help='set this option to test the model') parser.add_argument('--gan_k', type=int, default=1, help='k value for adversarial loss') # Optimization specifications parser.add_argument('--lr', type=float, default=1e-4, help='learning rate') parser.add_argument('--decay', type=str, default='200-400-600-800', help='learning rate decay type') parser.add_argument('--gamma', type=float, default=0.5, help='learning rate decay factor for step decay') parser.add_argument('--optimizer', default='ADAM', choices=('SGD', 'ADAM', 'RMSprop'), help='optimizer to use (SGD | ADAM | RMSprop)') parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum') parser.add_argument('--betas', type=tuple, default=(0.9, 0.999), help='ADAM beta') parser.add_argument('--epsilon', type=float, default=1e-8, help='ADAM epsilon for numerical stability') parser.add_argument('--weight_decay', type=float, default=0, help='weight decay') parser.add_argument('--gclip', type=float, default=0, help='gradient clipping threshold (0 = no clipping)') # Loss specifications parser.add_argument('--loss', type=str, default='1*L1', help='loss function configuration') parser.add_argument('--skip_threshold', type=float, default='1e8', help='skipping batch that has large error') # Log specifications parser.add_argument('--save', type=str, default='test', help='file name to save') parser.add_argument('--load', type=str, default='', help='file name to load') parser.add_argument('--resume', type=int, default=0, help='resume from specific checkpoint') parser.add_argument('--save_models', action='store_true', help='save all intermediate models') parser.add_argument('--print_every', type=int, default=100, help='how many batches to wait before logging training status') parser.add_argument('--save_results', action='store_true', help='save output results') parser.add_argument('--save_gt', action='store_true', help='save low-resolution and high-resolution images together') args = parser.parse_args() template.set_template(args) args.scale = list(map(lambda x: int(x), args.scale.split('+'))) args.data_train = args.data_train.split('+') args.data_test = args.data_test.split('+') if args.epochs == 0: args.epochs = 1e8 for arg in vars(args): if vars(args)[arg] == 'True': vars(args)[arg] = True elif vars(args)[arg] == 'False': vars(args)[arg] = False
7,467
45.385093
89
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/__init__.py
0
0
0
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/videotester.py
import os import math import utility from data import common import torch import cv2 from tqdm import tqdm class VideoTester(): def __init__(self, args, my_model, ckp): self.args = args self.scale = args.scale self.ckp = ckp self.model = my_model self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo)) def test(self): torch.set_grad_enabled(False) self.ckp.write_log('\nEvaluation on video:') self.model.eval() timer_test = utility.timer() for idx_scale, scale in enumerate(self.scale): vidcap = cv2.VideoCapture(self.args.dir_demo) total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) vidwri = cv2.VideoWriter( self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)), cv2.VideoWriter_fourcc(*'XVID'), vidcap.get(cv2.CAP_PROP_FPS), ( int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) ) ) tqdm_test = tqdm(range(total_frames), ncols=80) for _ in tqdm_test: success, lr = vidcap.read() if not success: break lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) lr, = self.prepare(lr.unsqueeze(0)) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range).squeeze(0) normalized = sr * 255 / self.args.rgb_range ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy() vidwri.write(ndarr) vidcap.release() vidwri.release() self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True) def prepare(self, *args): device = torch.device('cpu' if self.args.cpu else 'cuda') def _prepare(tensor): if self.args.precision == 'half': tensor = tensor.half() return tensor.to(device) return [_prepare(a) for a in args]
2,280
30.246575
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/trainer.py
import os import math from decimal import Decimal import utility import torch import torch.nn.utils as utils from tqdm import tqdm class Trainer(): def __init__(self, args, loader, my_model, my_loss, ckp): self.args = args self.scale = args.scale self.ckp = ckp self.loader_train = loader.loader_train self.loader_test = loader.loader_test self.model = my_model self.loss = my_loss self.optimizer = utility.make_optimizer(args, self.model) if self.args.load != '': self.optimizer.load(ckp.dir, epoch=len(ckp.log)) self.error_last = 1e8 def train(self): self.loss.step() epoch = self.optimizer.get_last_epoch() + 1 lr = self.optimizer.get_lr() self.ckp.write_log( '[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)) ) self.loss.start_log() self.model.train() timer_data, timer_model = utility.timer(), utility.timer() # TEMP self.loader_train.dataset.set_scale(0) for batch, (lr, hr, _,) in enumerate(self.loader_train): lr, hr = self.prepare(lr, hr) timer_data.hold() timer_model.tic() self.optimizer.zero_grad() sr = self.model(lr, 0) loss = self.loss(sr, hr) loss.backward() if self.args.gclip > 0: utils.clip_grad_value_( self.model.parameters(), self.args.gclip ) self.optimizer.step() timer_model.hold() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format( (batch + 1) * self.args.batch_size, len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release())) timer_data.tic() self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1] self.optimizer.schedule() def test(self): torch.set_grad_enabled(False) epoch = self.optimizer.get_last_epoch() self.ckp.write_log('\nEvaluation:') self.ckp.add_log( torch.zeros(1, len(self.loader_test), len(self.scale)) ) self.model.eval() timer_test = utility.timer() if self.args.save_results: self.ckp.begin_background() for idx_data, d in enumerate(self.loader_test): for idx_scale, scale in enumerate(self.scale): d.dataset.set_scale(idx_scale) for lr, hr, filename in tqdm(d, ncols=80): lr, hr = self.prepare(lr, hr) sr = self.model(lr, idx_scale) sr = utility.quantize(sr, self.args.rgb_range) save_list = [sr] self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr( sr, hr, scale, self.args.rgb_range, dataset=d ) if self.args.save_gt: save_list.extend([lr, hr]) if self.args.save_results: self.ckp.save_results(d, filename[0], save_list, scale) self.ckp.log[-1, idx_data, idx_scale] /= len(d) best = self.ckp.log.max(0) self.ckp.write_log( '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format( d.dataset.name, scale, self.ckp.log[-1, idx_data, idx_scale], best[0][idx_data, idx_scale], best[1][idx_data, idx_scale] + 1 ) ) self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc())) self.ckp.write_log('Saving...') if self.args.save_results: self.ckp.end_background() if not self.args.test_only: self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch)) self.ckp.write_log( 'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True ) torch.set_grad_enabled(True) def prepare(self, *args): device = torch.device('cpu' if self.args.cpu else 'cuda') def _prepare(tensor): if self.args.precision == 'half': tensor = tensor.half() return tensor.to(device) return [_prepare(a) for a in args] def terminate(self): if self.args.test_only: self.test() return True else: epoch = self.optimizer.get_last_epoch() + 1 return epoch >= self.args.epochs
4,820
31.795918
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/loss/adversarial.py
import utility from types import SimpleNamespace from model import common from loss import discriminator import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim class Adversarial(nn.Module): def __init__(self, args, gan_type): super(Adversarial, self).__init__() self.gan_type = gan_type self.gan_k = args.gan_k self.dis = discriminator.Discriminator(args) if gan_type == 'WGAN_GP': # see https://arxiv.org/pdf/1704.00028.pdf pp.4 optim_dict = { 'optimizer': 'ADAM', 'betas': (0, 0.9), 'epsilon': 1e-8, 'lr': 1e-5, 'weight_decay': args.weight_decay, 'decay': args.decay, 'gamma': args.gamma } optim_args = SimpleNamespace(**optim_dict) else: optim_args = args self.optimizer = utility.make_optimizer(optim_args, self.dis) def forward(self, fake, real): # updating discriminator... self.loss = 0 fake_detach = fake.detach() # do not backpropagate through G for _ in range(self.gan_k): self.optimizer.zero_grad() # d: B x 1 tensor d_fake = self.dis(fake_detach) d_real = self.dis(real) retain_graph = False if self.gan_type == 'GAN': loss_d = self.bce(d_real, d_fake) elif self.gan_type.find('WGAN') >= 0: loss_d = (d_fake - d_real).mean() if self.gan_type.find('GP') >= 0: epsilon = torch.rand_like(fake).view(-1, 1, 1, 1) hat = fake_detach.mul(1 - epsilon) + real.mul(epsilon) hat.requires_grad = True d_hat = self.dis(hat) gradients = torch.autograd.grad( outputs=d_hat.sum(), inputs=hat, retain_graph=True, create_graph=True, only_inputs=True )[0] gradients = gradients.view(gradients.size(0), -1) gradient_norm = gradients.norm(2, dim=1) gradient_penalty = 10 * gradient_norm.sub(1).pow(2).mean() loss_d += gradient_penalty # from ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks elif self.gan_type == 'RGAN': better_real = d_real - d_fake.mean(dim=0, keepdim=True) better_fake = d_fake - d_real.mean(dim=0, keepdim=True) loss_d = self.bce(better_real, better_fake) retain_graph = True # Discriminator update self.loss += loss_d.item() loss_d.backward(retain_graph=retain_graph) self.optimizer.step() if self.gan_type == 'WGAN': for p in self.dis.parameters(): p.data.clamp_(-1, 1) self.loss /= self.gan_k # updating generator... d_fake_bp = self.dis(fake) # for backpropagation, use fake as it is if self.gan_type == 'GAN': label_real = torch.ones_like(d_fake_bp) loss_g = F.binary_cross_entropy_with_logits(d_fake_bp, label_real) elif self.gan_type.find('WGAN') >= 0: loss_g = -d_fake_bp.mean() elif self.gan_type == 'RGAN': better_real = d_real - d_fake_bp.mean(dim=0, keepdim=True) better_fake = d_fake_bp - d_real.mean(dim=0, keepdim=True) loss_g = self.bce(better_fake, better_real) # Generator loss return loss_g def state_dict(self, *args, **kwargs): state_discriminator = self.dis.state_dict(*args, **kwargs) state_optimizer = self.optimizer.state_dict() return dict(**state_discriminator, **state_optimizer) def bce(self, real, fake): label_real = torch.ones_like(real) label_fake = torch.zeros_like(fake) bce_real = F.binary_cross_entropy_with_logits(real, label_real) bce_fake = F.binary_cross_entropy_with_logits(fake, label_fake) bce_loss = bce_real + bce_fake return bce_loss # Some references # https://github.com/kuc2477/pytorch-wgan-gp/blob/master/model.py # OR # https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py
4,393
37.884956
84
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/loss/discriminator.py
from model import common import torch.nn as nn class Discriminator(nn.Module): ''' output is not normalized ''' def __init__(self, args): super(Discriminator, self).__init__() in_channels = args.n_colors out_channels = 64 depth = 7 def _block(_in_channels, _out_channels, stride=1): return nn.Sequential( nn.Conv2d( _in_channels, _out_channels, 3, padding=1, stride=stride, bias=False ), nn.BatchNorm2d(_out_channels), nn.LeakyReLU(negative_slope=0.2, inplace=True) ) m_features = [_block(in_channels, out_channels)] for i in range(depth): in_channels = out_channels if i % 2 == 1: stride = 1 out_channels *= 2 else: stride = 2 m_features.append(_block(in_channels, out_channels, stride=stride)) patch_size = args.patch_size // (2**((depth + 1) // 2)) m_classifier = [ nn.Linear(out_channels * patch_size**2, 1024), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Linear(1024, 1) ] self.features = nn.Sequential(*m_features) self.classifier = nn.Sequential(*m_classifier) def forward(self, x): features = self.features(x) output = self.classifier(features.view(features.size(0), -1)) return output
1,595
27.5
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/loss/vgg.py
from model import common import torch import torch.nn as nn import torch.nn.functional as F import torchvision.models as models class VGG(nn.Module): def __init__(self, conv_index, rgb_range=1): super(VGG, self).__init__() vgg_features = models.vgg19(pretrained=True).features modules = [m for m in vgg_features] if conv_index.find('22') >= 0: self.vgg = nn.Sequential(*modules[:8]) elif conv_index.find('54') >= 0: self.vgg = nn.Sequential(*modules[:35]) vgg_mean = (0.485, 0.456, 0.406) vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range) self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std) for p in self.parameters(): p.requires_grad = False def forward(self, sr, hr): def _forward(x): x = self.sub_mean(x) x = self.vgg(x) return x vgg_sr = _forward(sr) with torch.no_grad(): vgg_hr = _forward(hr.detach()) loss = F.mse_loss(vgg_sr, vgg_hr) return loss
1,106
28.918919
75
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/loss/__init__.py
import os from importlib import import_module import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn import torch.nn.functional as F class Loss(nn.modules.loss._Loss): def __init__(self, args, ckp): super(Loss, self).__init__() print('Preparing loss function:') self.n_GPUs = args.n_GPUs self.loss = [] self.loss_module = nn.ModuleList() for loss in args.loss.split('+'): weight, loss_type = loss.split('*') if loss_type == 'MSE': loss_function = nn.MSELoss() elif loss_type == 'L1': loss_function = nn.L1Loss() elif loss_type.find('VGG') >= 0: module = import_module('loss.vgg') loss_function = getattr(module, 'VGG')( loss_type[3:], rgb_range=args.rgb_range ) elif loss_type.find('GAN') >= 0: module = import_module('loss.adversarial') loss_function = getattr(module, 'Adversarial')( args, loss_type ) self.loss.append({ 'type': loss_type, 'weight': float(weight), 'function': loss_function} ) if loss_type.find('GAN') >= 0: self.loss.append({'type': 'DIS', 'weight': 1, 'function': None}) if len(self.loss) > 1: self.loss.append({'type': 'Total', 'weight': 0, 'function': None}) for l in self.loss: if l['function'] is not None: print('{:.3f} * {}'.format(l['weight'], l['type'])) self.loss_module.append(l['function']) self.log = torch.Tensor() device = torch.device('cpu' if args.cpu else 'cuda') self.loss_module.to(device) if args.precision == 'half': self.loss_module.half() if not args.cpu and args.n_GPUs > 1: self.loss_module = nn.DataParallel( self.loss_module, range(args.n_GPUs) ) if args.load != '': self.load(ckp.dir, cpu=args.cpu) def forward(self, sr, hr): losses = [] for i, l in enumerate(self.loss): if l['function'] is not None: loss = l['function'](sr, hr) effective_loss = l['weight'] * loss losses.append(effective_loss) self.log[-1, i] += effective_loss.item() elif l['type'] == 'DIS': self.log[-1, i] += self.loss[i - 1]['function'].loss loss_sum = sum(losses) if len(self.loss) > 1: self.log[-1, -1] += loss_sum.item() return loss_sum def step(self): for l in self.get_loss_module(): if hasattr(l, 'scheduler'): l.scheduler.step() def start_log(self): self.log = torch.cat((self.log, torch.zeros(1, len(self.loss)))) def end_log(self, n_batches): self.log[-1].div_(n_batches) def display_loss(self, batch): n_samples = batch + 1 log = [] for l, c in zip(self.loss, self.log[-1]): log.append('[{}: {:.4f}]'.format(l['type'], c / n_samples)) return ''.join(log) def plot_loss(self, apath, epoch): axis = np.linspace(1, epoch, epoch) for i, l in enumerate(self.loss): label = '{} Loss'.format(l['type']) fig = plt.figure() plt.title(label) plt.plot(axis, self.log[:, i].numpy(), label=label) plt.legend() plt.xlabel('Epochs') plt.ylabel('Loss') plt.grid(True) plt.savefig(os.path.join(apath, 'loss_{}.pdf'.format(l['type']))) plt.close(fig) def get_loss_module(self): if self.n_GPUs == 1: return self.loss_module else: return self.loss_module.module def save(self, apath): torch.save(self.state_dict(), os.path.join(apath, 'loss.pt')) torch.save(self.log, os.path.join(apath, 'loss_log.pt')) def load(self, apath, cpu=False): if cpu: kwargs = {'map_location': lambda storage, loc: storage} else: kwargs = {} self.load_state_dict(torch.load( os.path.join(apath, 'loss.pt'), **kwargs )) self.log = torch.load(os.path.join(apath, 'loss_log.pt')) for l in self.get_loss_module(): if hasattr(l, 'scheduler'): for _ in range(len(self.log)): l.scheduler.step()
4,659
31.361111
80
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/utils/tools.py
import os import torch import numpy as np from PIL import Image import torch.nn.functional as F def normalize(x): return x.mul_(2).add_(-1) def same_padding(images, ksizes, strides, rates): assert len(images.size()) == 4 batch_size, channel, rows, cols = images.size() out_rows = (rows + strides[0] - 1) // strides[0] out_cols = (cols + strides[1] - 1) // strides[1] effective_k_row = (ksizes[0] - 1) * rates[0] + 1 effective_k_col = (ksizes[1] - 1) * rates[1] + 1 padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows) padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols) # Pad the input padding_top = int(padding_rows / 2.) padding_left = int(padding_cols / 2.) padding_bottom = padding_rows - padding_top padding_right = padding_cols - padding_left paddings = (padding_left, padding_right, padding_top, padding_bottom) images = torch.nn.ZeroPad2d(paddings)(images) return images def extract_image_patches(images, ksizes, strides, rates, padding='same'): """ Extract patches from images and put them in the C output dimension. :param padding: :param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape :param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for each dimension of images :param strides: [stride_rows, stride_cols] :param rates: [dilation_rows, dilation_cols] :return: A Tensor """ assert len(images.size()) == 4 assert padding in ['same', 'valid'] batch_size, channel, height, width = images.size() if padding == 'same': images = same_padding(images, ksizes, strides, rates) elif padding == 'valid': pass else: raise NotImplementedError('Unsupported padding type: {}.\ Only "same" or "valid" are supported.'.format(padding)) unfold = torch.nn.Unfold(kernel_size=ksizes, dilation=rates, padding=0, stride=strides) patches = unfold(images) return patches # [N, C*k*k, L], L is the total number of such blocks def reduce_mean(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.mean(x, dim=i, keepdim=keepdim) return x def reduce_std(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.std(x, dim=i, keepdim=keepdim) return x def reduce_sum(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.sum(x, dim=i, keepdim=keepdim) return x
2,777
32.878049
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/utils/__init__.py
0
0
0
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/div2kjpeg.py
import os from data import srdata from data import div2k class DIV2KJPEG(div2k.DIV2K): def __init__(self, args, name='', train=True, benchmark=False): self.q_factor = int(name.replace('DIV2K-Q', '')) super(DIV2KJPEG, self).__init__( args, name=name, train=train, benchmark=benchmark ) def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, 'DIV2K') self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR') self.dir_lr = os.path.join( self.apath, 'DIV2K_Q{}'.format(self.q_factor) ) if self.input_large: self.dir_lr += 'L' self.ext = ('.png', '.jpg')
675
31.190476
67
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/sr291.py
from data import srdata class SR291(srdata.SRData): def __init__(self, args, name='SR291', train=True, benchmark=False): super(SR291, self).__init__(args, name=name)
180
24.857143
72
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/benchmark.py
import os from data import common from data import srdata import numpy as np import torch import torch.utils.data as data class Benchmark(srdata.SRData): def __init__(self, args, name='', train=True, benchmark=True): super(Benchmark, self).__init__( args, name=name, train=train, benchmark=True ) def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, 'benchmark', self.name) self.dir_hr = os.path.join(self.apath, 'HR') if self.input_large: self.dir_lr = os.path.join(self.apath, 'LR_bicubicL') else: self.dir_lr = os.path.join(self.apath, 'LR_bicubic') self.ext = ('','.jpg')
702
26.038462
67
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/video.py
import os from data import common import cv2 import numpy as np import imageio import torch import torch.utils.data as data class Video(data.Dataset): def __init__(self, args, name='Video', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.do_eval = False self.benchmark = benchmark self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo)) self.vidcap = cv2.VideoCapture(args.dir_demo) self.n_frames = 0 self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) def __getitem__(self, idx): success, lr = self.vidcap.read() if success: self.n_frames += 1 lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames) else: vidcap.release() return None def __len__(self): return self.total_frames def set_scale(self, idx_scale): self.idx_scale = idx_scale
1,207
25.844444
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/srdata.py
import os import glob import random import pickle from data import common import numpy as np import imageio import torch import torch.utils.data as data class SRData(data.Dataset): def __init__(self, args, name='', train=True, benchmark=False): self.args = args self.name = name self.train = train self.split = 'train' if train else 'test' self.do_eval = True self.benchmark = benchmark self.input_large = (args.model == 'VDSR') self.scale = args.scale self.idx_scale = 0 self._set_filesystem(args.dir_data) if args.ext.find('img') < 0: path_bin = os.path.join(self.apath, 'bin') os.makedirs(path_bin, exist_ok=True) list_hr, list_lr = self._scan() if args.ext.find('img') >= 0 or benchmark: self.images_hr, self.images_lr = list_hr, list_lr elif args.ext.find('sep') >= 0: os.makedirs( self.dir_hr.replace(self.apath, path_bin), exist_ok=True ) for s in self.scale: os.makedirs( os.path.join( self.dir_lr.replace(self.apath, path_bin), 'X{}'.format(s) ), exist_ok=True ) self.images_hr, self.images_lr = [], [[] for _ in self.scale] for h in list_hr: b = h.replace(self.apath, path_bin) b = b.replace(self.ext[0], '.pt') self.images_hr.append(b) self._check_and_load(args.ext, h, b, verbose=True) for i, ll in enumerate(list_lr): for l in ll: b = l.replace(self.apath, path_bin) b = b.replace(self.ext[1], '.pt') self.images_lr[i].append(b) self._check_and_load(args.ext, l, b, verbose=True) if train: n_patches = args.batch_size * args.test_every n_images = len(args.data_train) * len(self.images_hr) if n_images == 0: self.repeat = 0 else: self.repeat = max(n_patches // n_images, 1) # Below functions as used to prepare images def _scan(self): names_hr = sorted( glob.glob(os.path.join(self.dir_hr, '*' + self.ext[0])) ) names_lr = [[] for _ in self.scale] for f in names_hr: filename, _ = os.path.splitext(os.path.basename(f)) for si, s in enumerate(self.scale): names_lr[si].append(os.path.join( self.dir_lr, 'X{}/{}{}'.format( s, filename, self.ext[1] ) )) return names_hr, names_lr def _set_filesystem(self, dir_data): self.apath = os.path.join(dir_data, self.name) self.dir_hr = os.path.join(self.apath, 'HR') self.dir_lr = os.path.join(self.apath, 'LR_bicubic') if self.input_large: self.dir_lr += 'L' self.ext = ('.png', '.jpg') def _check_and_load(self, ext, img, f, verbose=True): if not os.path.isfile(f) or ext.find('reset') >= 0: if verbose: print('Making a binary: {}'.format(f)) with open(f, 'wb') as _f: pickle.dump(imageio.imread(img), _f) def __getitem__(self, idx): lr, hr, filename = self._load_file(idx) pair = self.get_patch(lr, hr) pair = common.set_channel(*pair, n_channels=self.args.n_colors) pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range) return pair_t[0], pair_t[1], filename def __len__(self): if self.train: return len(self.images_hr) * self.repeat else: return len(self.images_hr) def _get_index(self, idx): if self.train: return idx % len(self.images_hr) else: return idx def _load_file(self, idx): idx = self._get_index(idx) f_hr = self.images_hr[idx] f_lr = self.images_lr[self.idx_scale][idx] filename, _ = os.path.splitext(os.path.basename(f_hr)) if self.args.ext == 'img' or self.benchmark: hr = imageio.imread(f_hr) lr = imageio.imread(f_lr) elif self.args.ext.find('sep') >= 0: with open(f_hr, 'rb') as _f: hr = pickle.load(_f) with open(f_lr, 'rb') as _f: lr = pickle.load(_f) return lr, hr, filename def get_patch(self, lr, hr): scale = self.scale[self.idx_scale] if self.train: lr, hr = common.get_patch( lr, hr, patch_size=self.args.patch_size, scale=scale, multi=(len(self.scale) > 1), input_large=self.input_large ) if not self.args.no_augment: lr, hr = common.augment(lr, hr) else: ih, iw = lr.shape[:2] hr = hr[0:ih * scale, 0:iw * scale] return lr, hr def set_scale(self, idx_scale): if not self.input_large: self.idx_scale = idx_scale else: self.idx_scale = random.randint(0, len(self.scale) - 1)
5,337
32.78481
73
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/demo.py
import os from data import common import numpy as np import imageio import torch import torch.utils.data as data class Demo(data.Dataset): def __init__(self, args, name='Demo', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.benchmark = benchmark self.filelist = [] for f in os.listdir(args.dir_demo): if f.find('.png') >= 0 or f.find('.jp') >= 0: self.filelist.append(os.path.join(args.dir_demo, f)) self.filelist.sort() def __getitem__(self, idx): filename = os.path.splitext(os.path.basename(self.filelist[idx]))[0] lr = imageio.imread(self.filelist[idx]) lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, filename def __len__(self): return len(self.filelist) def set_scale(self, idx_scale): self.idx_scale = idx_scale
1,075
25.9
76
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/common.py
import random import numpy as np import skimage.color as sc import torch def get_patch(*args, patch_size=96, scale=1, multi=False, input_large=False): ih, iw = args[0].shape[:2] print('heelo') print(args[0].shape) if not input_large: p = 1 if multi else 1 tp = p * patch_size ip = tp // 1 else: tp = patch_size ip = patch_size ix = random.randrange(0, iw - ip + 1) iy = random.randrange(0, ih - ip + 1) if not input_large: tx, ty = 1 * ix, 1 * iy else: tx, ty = ix, iy ret = [ args[0][iy:iy + ip, ix:ix + ip], *[a[ty:ty + tp, tx:tx + tp] for a in args[1:]] ] return ret def set_channel(*args, n_channels=3): def _set_channel(img): if img.ndim == 2: img = np.expand_dims(img, axis=2) c = img.shape[2] if n_channels == 1 and c == 3: img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2) elif n_channels == 3 and c == 1: img = np.concatenate([img] * n_channels, 2) return img return [_set_channel(a) for a in args] def np2Tensor(*args, rgb_range=255): def _np2Tensor(img): np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1))) tensor = torch.from_numpy(np_transpose).float() tensor.mul_(rgb_range / 255) return tensor return [_np2Tensor(a) for a in args] def augment(*args, hflip=True, rot=True): hflip = hflip and random.random() < 0.5 vflip = rot and random.random() < 0.5 rot90 = rot and random.random() < 0.5 def _augment(img): if hflip: img = img[:, ::-1] if vflip: img = img[::-1, :] if rot90: img = img.transpose(1, 0) return img return [_augment(a) for a in args]
1,799
23
77
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/__init__.py
from importlib import import_module #from dataloader import MSDataLoader from torch.utils.data import dataloader from torch.utils.data import ConcatDataset # This is a simple wrapper function for ConcatDataset class MyConcatDataset(ConcatDataset): def __init__(self, datasets): super(MyConcatDataset, self).__init__(datasets) self.train = datasets[0].train def set_scale(self, idx_scale): for d in self.datasets: if hasattr(d, 'set_scale'): d.set_scale(idx_scale) class Data: def __init__(self, args): self.loader_train = None if not args.test_only: datasets = [] for d in args.data_train: module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) datasets.append(getattr(m, module_name)(args, name=d)) self.loader_train = dataloader.DataLoader( MyConcatDataset(datasets), batch_size=args.batch_size, shuffle=True, pin_memory=not args.cpu, num_workers=args.n_threads, ) self.loader_test = [] for d in args.data_test: if d in ['CBSD68','classic5','LIVE1','Kodak24','Set5', 'Set14', 'B100', 'Urban100']: m = import_module('data.benchmark') testset = getattr(m, 'Benchmark')(args, train=False, name=d) else: module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) testset = getattr(m, module_name)(args, train=False, name=d) self.loader_test.append( dataloader.DataLoader( testset, batch_size=1, shuffle=False, pin_memory=not args.cpu, num_workers=args.n_threads, ) )
1,987
36.509434
96
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/data/div2k.py
import os from data import srdata class DIV2K(srdata.SRData): def __init__(self, args, name='DIV2K', train=True, benchmark=False): data_range = [r.split('-') for r in args.data_range.split('/')] if train: data_range = data_range[0] else: if args.test_only and len(data_range) == 1: data_range = data_range[0] else: data_range = data_range[1] self.begin, self.end = list(map(lambda x: int(x), data_range)) super(DIV2K, self).__init__( args, name=name, train=train, benchmark=benchmark ) def _scan(self): names_hr, names_lr = super(DIV2K, self)._scan() names_hr = names_hr[self.begin - 1:self.end] names_lr = [n[self.begin - 1:self.end] for n in names_lr] return names_hr, names_lr def _set_filesystem(self, dir_data): super(DIV2K, self)._set_filesystem(dir_data) self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR') self.dir_lr = os.path.join(self.apath, 'DIV2K_train_LR_bicubic') if self.input_large: self.dir_lr += 'L'
1,134
33.393939
72
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/rcan.py
## ECCV-2018-Image Super-Resolution Using Very Deep Residual Channel Attention Networks ## https://arxiv.org/abs/1807.02758 from model import common import torch.nn as nn def make_model(args, parent=False): return RCAN(args) ## Channel Attention (CA) Layer class CALayer(nn.Module): def __init__(self, channel, reduction=16): super(CALayer, self).__init__() # global average pooling: feature --> point self.avg_pool = nn.AdaptiveAvgPool2d(1) # feature channel downscale and upscale --> channel weight self.conv_du = nn.Sequential( nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True), nn.ReLU(inplace=True), nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True), nn.Sigmoid() ) def forward(self, x): y = self.avg_pool(x) y = self.conv_du(y) return x * y ## Residual Channel Attention Block (RCAB) class RCAB(nn.Module): def __init__( self, conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1): super(RCAB, self).__init__() modules_body = [] for i in range(2): modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias)) if bn: modules_body.append(nn.BatchNorm2d(n_feat)) if i == 0: modules_body.append(act) modules_body.append(CALayer(n_feat, reduction)) self.body = nn.Sequential(*modules_body) self.res_scale = res_scale def forward(self, x): res = self.body(x) #res = self.body(x).mul(self.res_scale) res += x return res ## Residual Group (RG) class ResidualGroup(nn.Module): def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks): super(ResidualGroup, self).__init__() modules_body = [] modules_body = [ RCAB( conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1) \ for _ in range(n_resblocks)] modules_body.append(conv(n_feat, n_feat, kernel_size)) self.body = nn.Sequential(*modules_body) def forward(self, x): res = self.body(x) res += x return res ## Residual Channel Attention Network (RCAN) class RCAN(nn.Module): def __init__(self, args, conv=common.default_conv): super(RCAN, self).__init__() n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 reduction = args.reduction scale = args.scale[0] act = nn.ReLU(True) # RGB mean for DIV2K rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module modules_body = [ ResidualGroup( conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \ for _ in range(n_resgroups)] modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail(res) x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=False): own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') >= 0: print('Replace pre-trained upsampler to new one...') else: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key "{}" in state_dict' .format(name)) if strict: missing = set(own_state.keys()) - set(state_dict.keys()) if len(missing) > 0: raise KeyError('missing keys in state_dict: "{}"'.format(missing))
5,178
34.717241
116
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/ddbpn.py
# Deep Back-Projection Networks For Super-Resolution # https://arxiv.org/abs/1803.02735 from model import common import torch import torch.nn as nn def make_model(args, parent=False): return DDBPN(args) def projection_conv(in_channels, out_channels, scale, up=True): kernel_size, stride, padding = { 2: (6, 2, 2), 4: (8, 4, 2), 8: (12, 8, 2) }[scale] if up: conv_f = nn.ConvTranspose2d else: conv_f = nn.Conv2d return conv_f( in_channels, out_channels, kernel_size, stride=stride, padding=padding ) class DenseProjection(nn.Module): def __init__(self, in_channels, nr, scale, up=True, bottleneck=True): super(DenseProjection, self).__init__() if bottleneck: self.bottleneck = nn.Sequential(*[ nn.Conv2d(in_channels, nr, 1), nn.PReLU(nr) ]) inter_channels = nr else: self.bottleneck = None inter_channels = in_channels self.conv_1 = nn.Sequential(*[ projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr) ]) self.conv_2 = nn.Sequential(*[ projection_conv(nr, inter_channels, scale, not up), nn.PReLU(inter_channels) ]) self.conv_3 = nn.Sequential(*[ projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr) ]) def forward(self, x): if self.bottleneck is not None: x = self.bottleneck(x) a_0 = self.conv_1(x) b_0 = self.conv_2(a_0) e = b_0.sub(x) a_1 = self.conv_3(e) out = a_0.add(a_1) return out class DDBPN(nn.Module): def __init__(self, args): super(DDBPN, self).__init__() scale = args.scale[0] n0 = 128 nr = 32 self.depth = 6 rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) initial = [ nn.Conv2d(args.n_colors, n0, 3, padding=1), nn.PReLU(n0), nn.Conv2d(n0, nr, 1), nn.PReLU(nr) ] self.initial = nn.Sequential(*initial) self.upmodules = nn.ModuleList() self.downmodules = nn.ModuleList() channels = nr for i in range(self.depth): self.upmodules.append( DenseProjection(channels, nr, scale, True, i > 1) ) if i != 0: channels += nr channels = nr for i in range(self.depth - 1): self.downmodules.append( DenseProjection(channels, nr, scale, False, i != 0) ) channels += nr reconstruction = [ nn.Conv2d(self.depth * nr, args.n_colors, 3, padding=1) ] self.reconstruction = nn.Sequential(*reconstruction) self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) def forward(self, x): x = self.sub_mean(x) x = self.initial(x) h_list = [] l_list = [] for i in range(self.depth - 1): if i == 0: l = x else: l = torch.cat(l_list, dim=1) h_list.append(self.upmodules[i](l)) l_list.append(self.downmodules[i](torch.cat(h_list, dim=1))) h_list.append(self.upmodules[-1](torch.cat(l_list, dim=1))) out = self.reconstruction(torch.cat(h_list, dim=1)) out = self.add_mean(out) return out
3,629
26.5
78
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/rdn.py
# Residual Dense Network for Image Super-Resolution # https://arxiv.org/abs/1802.08797 from model import common import torch import torch.nn as nn def make_model(args, parent=False): return RDN(args) class RDB_Conv(nn.Module): def __init__(self, inChannels, growRate, kSize=3): super(RDB_Conv, self).__init__() Cin = inChannels G = growRate self.conv = nn.Sequential(*[ nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1), nn.ReLU() ]) def forward(self, x): out = self.conv(x) return torch.cat((x, out), 1) class RDB(nn.Module): def __init__(self, growRate0, growRate, nConvLayers, kSize=3): super(RDB, self).__init__() G0 = growRate0 G = growRate C = nConvLayers convs = [] for c in range(C): convs.append(RDB_Conv(G0 + c*G, G)) self.convs = nn.Sequential(*convs) # Local Feature Fusion self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1) def forward(self, x): return self.LFF(self.convs(x)) + x class RDN(nn.Module): def __init__(self, args): super(RDN, self).__init__() r = args.scale[0] G0 = args.G0 kSize = args.RDNkSize # number of RDB blocks, conv layers, out channels self.D, C, G = { 'A': (20, 6, 32), 'B': (16, 8, 64), }[args.RDNconfig] # Shallow feature extraction net self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1) self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) # Redidual dense blocks and dense feature fusion self.RDBs = nn.ModuleList() for i in range(self.D): self.RDBs.append( RDB(growRate0 = G0, growRate = G, nConvLayers = C) ) # Global Feature Fusion self.GFF = nn.Sequential(*[ nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1), nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) ]) # Up-sampling net if r == 2 or r == 3: self.UPNet = nn.Sequential(*[ nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(r), nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) ]) elif r == 4: self.UPNet = nn.Sequential(*[ nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(2), nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(2), nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) ]) else: raise ValueError("scale must be 2 or 3 or 4.") def forward(self, x): f__1 = self.SFENet1(x) x = self.SFENet2(f__1) RDBs_out = [] for i in range(self.D): x = self.RDBs[i](x) RDBs_out.append(x) x = self.GFF(torch.cat(RDBs_out,1)) x += f__1 return self.UPNet(x)
3,202
29.216981
90
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/mdsr.py
from model import common import torch.nn as nn def make_model(args, parent=False): return MDSR(args) class MDSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(MDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 self.scale_idx = 0 act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) m_head = [conv(args.n_colors, n_feats, kernel_size)] self.pre_process = nn.ModuleList([ nn.Sequential( common.ResBlock(conv, n_feats, 5, act=act), common.ResBlock(conv, n_feats, 5, act=act) ) for _ in args.scale ]) m_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) self.upsample = nn.ModuleList([ common.Upsampler( conv, s, n_feats, act=False ) for s in args.scale ]) m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x): x = self.sub_mean(x) x = self.head(x) x = self.pre_process[self.scale_idx](x) res = self.body(x) res += x x = self.upsample[self.scale_idx](res) x = self.tail(x) x = self.add_mean(x) return x def set_scale(self, scale_idx): self.scale_idx = scale_idx
1,837
25.637681
78
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/common.py
import math import torch import torch.nn as nn import torch.nn.functional as F def default_conv(in_channels, out_channels, kernel_size,stride=1, bias=True): return nn.Conv2d( in_channels, out_channels, kernel_size, padding=(kernel_size//2),stride=stride, bias=bias) class MeanShift(nn.Conv2d): def __init__( self, rgb_range, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1): super(MeanShift, self).__init__(3, 3, kernel_size=1) std = torch.Tensor(rgb_std) self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1) self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std for p in self.parameters(): p.requires_grad = False class BasicBlock(nn.Sequential): def __init__( self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True, bn=False, act=nn.PReLU()): m = [conv(in_channels, out_channels, kernel_size, bias=bias)] if bn: m.append(nn.BatchNorm2d(out_channels)) if act is not None: m.append(act) super(BasicBlock, self).__init__(*m) class ResBlock(nn.Module): def __init__( self, conv, n_feats, kernel_size, bias=True, bn=False, act=nn.PReLU(), res_scale=1): super(ResBlock, self).__init__() m = [] for i in range(2): m.append(conv(n_feats, n_feats, kernel_size, bias=bias)) if bn: m.append(nn.BatchNorm2d(n_feats)) if i == 0: m.append(act) self.body = nn.Sequential(*m) self.res_scale = res_scale def forward(self, x): res = self.body(x).mul(self.res_scale) res += x return res class Upsampler(nn.Sequential): def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: # Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feats, 4 * n_feats, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) elif scale == 3: m.append(conv(n_feats, 9 * n_feats, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feats)) if act == 'relu': m.append(nn.ReLU(True)) elif act == 'prelu': m.append(nn.PReLU(n_feats)) else: raise NotImplementedError super(Upsampler, self).__init__(*m)
2,799
30.460674
80
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/__init__.py
import os from importlib import import_module import torch import torch.nn as nn from torch.autograd import Variable class Model(nn.Module): def __init__(self, args, ckp): super(Model, self).__init__() print('Making model...') self.scale = args.scale self.idx_scale = 0 self.self_ensemble = args.self_ensemble self.chop = args.chop self.precision = args.precision self.cpu = args.cpu self.device = torch.device('cpu' if args.cpu else 'cuda') self.n_GPUs = args.n_GPUs self.save_models = args.save_models module = import_module('model.' + args.model.lower()) self.model = module.make_model(args).to(self.device) if args.precision == 'half': self.model.half() if not args.cpu and args.n_GPUs > 1: self.model = nn.DataParallel(self.model, range(args.n_GPUs)) self.load( ckp.dir, pre_train=args.pre_train, resume=args.resume, cpu=args.cpu ) print(self.model, file=ckp.log_file) def forward(self, x, idx_scale): self.idx_scale = idx_scale target = self.get_model() if hasattr(target, 'set_scale'): target.set_scale(idx_scale) if self.self_ensemble and not self.training: if self.chop: forward_function = self.forward_chop else: forward_function = self.model.forward return self.forward_x8(x, forward_function) elif self.chop and not self.training: return self.forward_chop(x) else: return self.model(x) def get_model(self): if self.n_GPUs == 1: return self.model else: return self.model.module def state_dict(self, **kwargs): target = self.get_model() return target.state_dict(**kwargs) def save(self, apath, epoch, is_best=False): target = self.get_model() torch.save( target.state_dict(), os.path.join(apath, 'model_latest.pt') ) if is_best: torch.save( target.state_dict(), os.path.join(apath, 'model_best.pt') ) if self.save_models: torch.save( target.state_dict(), os.path.join(apath, 'model_{}.pt'.format(epoch)) ) def load(self, apath, pre_train='.', resume=-1, cpu=False): if cpu: kwargs = {'map_location': lambda storage, loc: storage} else: kwargs = {} if resume == -1: self.get_model().load_state_dict( torch.load( os.path.join(apath, 'model_latest.pt'), **kwargs ), strict=False ) elif resume == 0: if pre_train != '.': print('Loading model from {}'.format(pre_train)) self.get_model().load_state_dict( torch.load(pre_train, **kwargs), strict=False ) else: self.get_model().load_state_dict( torch.load( os.path.join(apath, 'model', 'model_{}.pt'.format(resume)), **kwargs ), strict=False ) def forward_chop(self, x, shave=10, min_size=6400): scale = self.scale[self.idx_scale] scale =1 n_GPUs = min(self.n_GPUs, 4) b, c, h, w = x.size() h_half, w_half = h // 2, w // 2 h_size, w_size = h_half + shave, w_half + shave lr_list = [ x[:, :, 0:h_size, 0:w_size], x[:, :, 0:h_size, (w - w_size):w], x[:, :, (h - h_size):h, 0:w_size], x[:, :, (h - h_size):h, (w - w_size):w]] if w_size * h_size < min_size: sr_list = [] for i in range(0, 4, n_GPUs): lr_batch = torch.cat(lr_list[i:(i + n_GPUs)], dim=0) sr_batch = self.model(lr_batch) sr_list.extend(sr_batch.chunk(n_GPUs, dim=0)) else: sr_list = [ self.forward_chop(patch, shave=shave, min_size=min_size) \ for patch in lr_list ] h, w = scale * h, scale * w h_half, w_half = scale * h_half, scale * w_half h_size, w_size = scale * h_size, scale * w_size shave *= scale output = x.new(b, c, h, w) output[:, :, 0:h_half, 0:w_half] \ = sr_list[0][:, :, 0:h_half, 0:w_half] output[:, :, 0:h_half, w_half:w] \ = sr_list[1][:, :, 0:h_half, (w_size - w + w_half):w_size] output[:, :, h_half:h, 0:w_half] \ = sr_list[2][:, :, (h_size - h + h_half):h_size, 0:w_half] output[:, :, h_half:h, w_half:w] \ = sr_list[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size] return output def forward_x8(self, x, forward_function): def _transform(v, op): if self.precision != 'single': v = v.float() v2np = v.data.cpu().numpy() if op == 'v': tfnp = v2np[:, :, :, ::-1].copy() elif op == 'h': tfnp = v2np[:, :, ::-1, :].copy() elif op == 't': tfnp = v2np.transpose((0, 1, 3, 2)).copy() ret = torch.Tensor(tfnp).to(self.device) if self.precision == 'half': ret = ret.half() return ret lr_list = [x] for tf in 'v', 'h', 't': lr_list.extend([_transform(t, tf) for t in lr_list]) sr_list = [forward_function(aug) for aug in lr_list] for i in range(len(sr_list)): if i > 3: sr_list[i] = _transform(sr_list[i], 't') if i % 4 > 1: sr_list[i] = _transform(sr_list[i], 'h') if (i % 4) % 2 == 1: sr_list[i] = _transform(sr_list[i], 'v') output_cat = torch.cat(sr_list, dim=0) output = output_cat.mean(dim=0, keepdim=True) return output
6,199
31.460733
90
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/panet.py
from model import common from model import attention import torch.nn as nn def make_model(args, parent=False): return PANET(args) class PANET(nn.Module): def __init__(self, args, conv=common.default_conv): super(PANET, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) msa = attention.PyramidAttention() # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale ) for _ in range(n_resblocks//2) ] m_body.append(msa) for i in range(n_resblocks//2): m_body.append(common.ResBlock(conv,n_feats,kernel_size,nn.PReLU(),res_scale=args.res_scale)) m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module #m_tail = [ # common.Upsampler(conv, scale, n_feats, act=False), # conv(n_feats, args.n_colors, kernel_size) #] m_tail = [ conv(n_feats, args.n_colors, kernel_size) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) def forward(self, x): #x = self.sub_mean(x) x = self.head(x) res = self.body(x) res += x x = self.tail(res) #x = self.add_mean(x) return x def load_state_dict(self, state_dict, strict=True): own_state = self.state_dict() for name, param in state_dict.items(): if name in own_state: if isinstance(param, nn.Parameter): param = param.data try: own_state[name].copy_(param) except Exception: if name.find('tail') == -1: raise RuntimeError('While copying the parameter named {}, ' 'whose dimensions in the model are {} and ' 'whose dimensions in the checkpoint are {}.' .format(name, own_state[name].size(), param.size())) elif strict: if name.find('tail') == -1: raise KeyError('unexpected key "{}" in state_dict' .format(name))
2,779
32.493976
104
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/attention.py
import torch import torch.nn as nn import torch.nn.functional as F from torchvision import transforms from torchvision import utils as vutils from model import common from utils.tools import extract_image_patches,\ reduce_mean, reduce_sum, same_padding class PyramidAttention(nn.Module): def __init__(self, level=5, res_scale=1, channel=64, reduction=2, ksize=3, stride=1, softmax_scale=10, average=True, conv=common.default_conv): super(PyramidAttention, self).__init__() self.ksize = ksize self.stride = stride self.res_scale = res_scale self.softmax_scale = softmax_scale self.scale = [1-i/10 for i in range(level)] self.average = average escape_NaN = torch.FloatTensor([1e-4]) self.register_buffer('escape_NaN', escape_NaN) self.conv_match_L_base = common.BasicBlock(conv,channel,channel//reduction, 1, bn=False, act=nn.PReLU()) self.conv_match = common.BasicBlock(conv,channel, channel//reduction, 1, bn=False, act=nn.PReLU()) self.conv_assembly = common.BasicBlock(conv,channel, channel,1,bn=False, act=nn.PReLU()) def forward(self, input): res = input #theta match_base = self.conv_match_L_base(input) shape_base = list(res.size()) input_groups = torch.split(match_base,1,dim=0) # patch size for matching kernel = self.ksize # raw_w is for reconstruction raw_w = [] # w is for matching w = [] #build feature pyramid for i in range(len(self.scale)): ref = input if self.scale[i]!=1: ref = F.interpolate(input, scale_factor=self.scale[i], mode='bicubic') #feature transformation function f base = self.conv_assembly(ref) shape_input = base.shape #sampling raw_w_i = extract_image_patches(base, ksizes=[kernel, kernel], strides=[self.stride,self.stride], rates=[1, 1], padding='same') # [N, C*k*k, L] raw_w_i = raw_w_i.view(shape_input[0], shape_input[1], kernel, kernel, -1) raw_w_i = raw_w_i.permute(0, 4, 1, 2, 3) # raw_shape: [N, L, C, k, k] raw_w_i_groups = torch.split(raw_w_i, 1, dim=0) raw_w.append(raw_w_i_groups) #feature transformation function g ref_i = self.conv_match(ref) shape_ref = ref_i.shape #sampling w_i = extract_image_patches(ref_i, ksizes=[self.ksize, self.ksize], strides=[self.stride, self.stride], rates=[1, 1], padding='same') w_i = w_i.view(shape_ref[0], shape_ref[1], self.ksize, self.ksize, -1) w_i = w_i.permute(0, 4, 1, 2, 3) # w shape: [N, L, C, k, k] w_i_groups = torch.split(w_i, 1, dim=0) w.append(w_i_groups) y = [] for idx, xi in enumerate(input_groups): #group in a filter wi = torch.cat([w[i][idx][0] for i in range(len(self.scale))],dim=0) # [L, C, k, k] #normalize max_wi = torch.max(torch.sqrt(reduce_sum(torch.pow(wi, 2), axis=[1, 2, 3], keepdim=True)), self.escape_NaN) wi_normed = wi/ max_wi #matching xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1]) # xi: 1*c*H*W yi = F.conv2d(xi, wi_normed, stride=1) # [1, L, H, W] L = shape_ref[2]*shape_ref[3] yi = yi.view(1,wi.shape[0], shape_base[2], shape_base[3]) # (B=1, C=32*32, H=32, W=32) # softmax matching score yi = F.softmax(yi*self.softmax_scale, dim=1) if self.average == False: yi = (yi == yi.max(dim=1,keepdim=True)[0]).float() # deconv for patch pasting raw_wi = torch.cat([raw_w[i][idx][0] for i in range(len(self.scale))],dim=0) yi = F.conv_transpose2d(yi, raw_wi, stride=self.stride,padding=1)/4. y.append(yi) y = torch.cat(y, dim=0)+res*self.res_scale # back to the mini-batch return y
4,427
46.106383
147
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/vdsr.py
from model import common import torch.nn as nn import torch.nn.init as init url = { 'r20f64': '' } def make_model(args, parent=False): return VDSR(args) class VDSR(nn.Module): def __init__(self, args, conv=common.default_conv): super(VDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 self.url = url['r{}f{}'.format(n_resblocks, n_feats)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) def basic_block(in_channels, out_channels, act): return common.BasicBlock( conv, in_channels, out_channels, kernel_size, bias=True, bn=False, act=act ) # define body module m_body = [] m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True))) for _ in range(n_resblocks - 2): m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True))) m_body.append(basic_block(n_feats, args.n_colors, None)) self.body = nn.Sequential(*m_body) def forward(self, x): x = self.sub_mean(x) res = self.body(x) res += x x = self.add_mean(res) return x
1,275
26.148936
73
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/utils/tools.py
import os import torch import numpy as np from PIL import Image import torch.nn.functional as F def normalize(x): return x.mul_(2).add_(-1) def same_padding(images, ksizes, strides, rates): assert len(images.size()) == 4 batch_size, channel, rows, cols = images.size() out_rows = (rows + strides[0] - 1) // strides[0] out_cols = (cols + strides[1] - 1) // strides[1] effective_k_row = (ksizes[0] - 1) * rates[0] + 1 effective_k_col = (ksizes[1] - 1) * rates[1] + 1 padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows) padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols) # Pad the input padding_top = int(padding_rows / 2.) padding_left = int(padding_cols / 2.) padding_bottom = padding_rows - padding_top padding_right = padding_cols - padding_left paddings = (padding_left, padding_right, padding_top, padding_bottom) images = torch.nn.ZeroPad2d(paddings)(images) return images def extract_image_patches(images, ksizes, strides, rates, padding='same'): """ Extract patches from images and put them in the C output dimension. :param padding: :param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape :param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for each dimension of images :param strides: [stride_rows, stride_cols] :param rates: [dilation_rows, dilation_cols] :return: A Tensor """ assert len(images.size()) == 4 assert padding in ['same', 'valid'] batch_size, channel, height, width = images.size() if padding == 'same': images = same_padding(images, ksizes, strides, rates) elif padding == 'valid': pass else: raise NotImplementedError('Unsupported padding type: {}.\ Only "same" or "valid" are supported.'.format(padding)) unfold = torch.nn.Unfold(kernel_size=ksizes, dilation=rates, padding=0, stride=strides) patches = unfold(images) return patches # [N, C*k*k, L], L is the total number of such blocks def reduce_mean(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.mean(x, dim=i, keepdim=keepdim) return x def reduce_std(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.std(x, dim=i, keepdim=keepdim) return x def reduce_sum(x, axis=None, keepdim=False): if not axis: axis = range(len(x.shape)) for i in sorted(axis, reverse=True): x = torch.sum(x, dim=i, keepdim=keepdim) return x
2,777
32.878049
79
py
Pyramid-Attention-Networks
Pyramid-Attention-Networks-master/CAR/code/model/utils/__init__.py
0
0
0
py
time-series-forecasting-release
time-series-forecasting-release/generic_model_trainer.py
import numpy as np import tensorflow as tf import argparse from utility_scripts.persist_optimized_config_results import persist_results from generic_model_tester import testing from utility_scripts.hyperparameter_scripts.hyperparameter_config_reader import read_initial_hyperparameter_values # import the config space and the different types of parameters from smac.configspace import ConfigurationSpace from ConfigSpace.hyperparameters import UniformFloatHyperparameter, UniformIntegerHyperparameter # import SMAC utilities from smac.scenario.scenario import Scenario from smac.facade.smac_facade import SMAC ## import the different model architectures # stacking model from rnn_architectures.stacking_model.stacking_model_trainer import \ StackingModelTrainer as StackingModelTrainer # seq2seq model with decoder from rnn_architectures.seq2seq_model.with_decoder.non_moving_window.unaccumulated_error.seq2seq_model_trainer import \ Seq2SeqModelTrainer as Seq2SeqModelTrainerWithNonMovingWindowUnaccumulatedError # seq2seq model with dense layer from rnn_architectures.seq2seq_model.with_dense_layer.non_moving_window.unaccumulated_error.seq2seq_model_trainer import \ Seq2SeqModelTrainerWithDenseLayer as Seq2SeqModelTrainerWithDenseLayerNonMovingWindowUnaccumulatedError from rnn_architectures.seq2seq_model.with_dense_layer.moving_window.unaccumulated_error.seq2seq_model_trainer import \ Seq2SeqModelTrainerWithDenseLayer as Seq2SeqModelTrainerWithDenseLayerMovingWindow # import the cocob optimizer from external_packages import cocob_optimizer from configs.global_configs import hyperparameter_tuning_configs from configs.global_configs import model_training_configs import csv LSTM_USE_PEEPHOLES = True BIAS = False optimized_config_directory = 'results/optimized_configurations/' learning_rate = 0.0 # function to create the optimizer def adagrad_optimizer_fn(total_loss): return tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(total_loss) def adam_optimizer_fn(total_loss): return tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss) def cocob_optimizer_fn(total_loss): return cocob_optimizer.COCOB().minimize(loss=total_loss) # Training the time series def train_model_smac(configs): error, _ = train_model(configs) return error # final execution with the optimized config def train_model(configs): if "rate_of_learning" in configs.keys(): rate_of_learning = configs["rate_of_learning"] global learning_rate learning_rate = rate_of_learning cell_dimension = configs["cell_dimension"] num_hidden_layers = configs["num_hidden_layers"] minibatch_size = configs["minibatch_size"] max_epoch_size = configs["max_epoch_size"] max_num_epochs = configs["max_num_epochs"] l2_regularization = configs["l2_regularization"] gaussian_noise_stdev = configs["gaussian_noise_stdev"] random_normal_initializer_stdev = configs["random_normal_initializer_stdev"] print(configs) # select the appropriate type of optimizer error, error_list = model_trainer.train_model(num_hidden_layers=num_hidden_layers, cell_dimension=cell_dimension, minibatch_size=minibatch_size, max_epoch_size=max_epoch_size, max_num_epochs=max_num_epochs, l2_regularization=l2_regularization, gaussian_noise_stdev=gaussian_noise_stdev, random_normal_initializer_stdev=random_normal_initializer_stdev, optimizer_fn=optimizer_fn) print(model_identifier) return error, error_list def smac(): # Build Configuration Space which defines all parameters and their ranges configuration_space = ConfigurationSpace() rate_of_learning = UniformFloatHyperparameter("rate_of_learning", hyperparameter_values_dic['rate_of_learning'][0], hyperparameter_values_dic['rate_of_learning'][1], default_value=hyperparameter_values_dic['rate_of_learning'][0]) cell_dimension = UniformIntegerHyperparameter("cell_dimension", hyperparameter_values_dic['cell_dimension'][0], hyperparameter_values_dic['cell_dimension'][1], default_value=hyperparameter_values_dic['cell_dimension'][ 0]) no_hidden_layers = UniformIntegerHyperparameter("num_hidden_layers", hyperparameter_values_dic['num_hidden_layers'][0], hyperparameter_values_dic['num_hidden_layers'][1], default_value=hyperparameter_values_dic['num_hidden_layers'][0]) minibatch_size = UniformIntegerHyperparameter("minibatch_size", hyperparameter_values_dic['minibatch_size'][0], hyperparameter_values_dic['minibatch_size'][1], default_value=hyperparameter_values_dic['minibatch_size'][0]) max_epoch_size = UniformIntegerHyperparameter("max_epoch_size", hyperparameter_values_dic['max_epoch_size'][0], hyperparameter_values_dic['max_epoch_size'][1], default_value=hyperparameter_values_dic['max_epoch_size'][0]) max_num_of_epochs = UniformIntegerHyperparameter("max_num_epochs", hyperparameter_values_dic['max_num_epochs'][0], hyperparameter_values_dic['max_num_epochs'][1], default_value=hyperparameter_values_dic['max_num_epochs'][0]) l2_regularization = UniformFloatHyperparameter("l2_regularization", hyperparameter_values_dic['l2_regularization'][0], hyperparameter_values_dic['l2_regularization'][1], default_value=hyperparameter_values_dic['l2_regularization'][0]) gaussian_noise_stdev = UniformFloatHyperparameter("gaussian_noise_stdev", hyperparameter_values_dic['gaussian_noise_stdev'][0], hyperparameter_values_dic['gaussian_noise_stdev'][1], default_value=hyperparameter_values_dic['gaussian_noise_stdev'][ 0]) random_normal_initializer_stdev = UniformFloatHyperparameter("random_normal_initializer_stdev", hyperparameter_values_dic[ 'random_normal_initializer_stdev'][0], hyperparameter_values_dic[ 'random_normal_initializer_stdev'][1], default_value=hyperparameter_values_dic[ 'random_normal_initializer_stdev'][ 0]) # add the hyperparameter for learning rate only if the optimization is not cocob if optimizer == "cocob": configuration_space.add_hyperparameters( [cell_dimension, no_hidden_layers, minibatch_size, max_epoch_size, max_num_of_epochs, l2_regularization, gaussian_noise_stdev, random_normal_initializer_stdev]) else: configuration_space.add_hyperparameters( [rate_of_learning, cell_dimension, minibatch_size, max_epoch_size, max_num_of_epochs, no_hidden_layers, l2_regularization, gaussian_noise_stdev, random_normal_initializer_stdev]) # creating the scenario object scenario = Scenario({ "run_obj": "quality", "runcount-limit": hyperparameter_tuning_configs.SMAC_RUNCOUNT_LIMIT, "cs": configuration_space, "deterministic": "true", "abort_on_first_run_crash": "false" }) # optimize using an SMAC object smac = SMAC(scenario=scenario, rng=np.random.RandomState(seed), tae_runner=train_model_smac) incumbent = smac.optimize() return incumbent.get_dictionary() if __name__ == '__main__': argument_parser = argparse.ArgumentParser("Train different forecasting models") argument_parser.add_argument('--dataset_name', required=True, help='Unique string for the name of the dataset') argument_parser.add_argument('--contain_zero_values', required=True, help='Whether the dataset contains zero values(0/1)') argument_parser.add_argument('--address_near_zero_instability', required=False, help='Whether to use a custom SMAPE function to address near zero instability(0/1). Default is 0') argument_parser.add_argument('--integer_conversion', required=False, help='Whether to convert the final forecasts to integers(0/1). Default is 0') argument_parser.add_argument('--initial_hyperparameter_values_file', required=True, help='The file for the initial hyperparameter configurations') argument_parser.add_argument('--binary_train_file_train_mode', required=True, help='The tfrecords file for train dataset in the training mode') argument_parser.add_argument('--binary_valid_file_train_mode', required=True, help='The tfrecords file for validation dataset in the training mode') argument_parser.add_argument('--binary_train_file_test_mode', required=True, help='The tfrecords file for train dataset in the testing mode') argument_parser.add_argument('--binary_test_file_test_mode', required=True, help='The tfrecords file for test dataset in the testing mode') argument_parser.add_argument('--txt_test_file', required=True, help='The txt file for test dataset') argument_parser.add_argument('--actual_results_file', required=True, help='The txt file of the actual results') argument_parser.add_argument('--original_data_file', required=True, help='The txt file of the original dataset') argument_parser.add_argument('--cell_type', required=False, help='The cell type of the RNN(LSTM/GRU/RNN). Default is LSTM') argument_parser.add_argument('--input_size', required=False, help='The input size of the moving window. Default is 0') argument_parser.add_argument('--seasonality_period', required=True, help='The seasonality period of the time series') argument_parser.add_argument('--forecast_horizon', required=True, help='The forecast horizon of the dataset') argument_parser.add_argument('--optimizer', required=True, help='The type of the optimizer(cocob/adam/adagrad...)') argument_parser.add_argument('--hyperparameter_tuning', required=True, help='The method for hyperparameter tuning(bayesian/smac)') argument_parser.add_argument('--model_type', required=True, help='The type of the model(stacking/seq2seq/seq2seqwithdenselayer)') argument_parser.add_argument('--input_format', required=True, help='Input format(moving_window/non_moving_window)') argument_parser.add_argument('--without_stl_decomposition', required=False, help='Whether not to use stl decomposition(0/1). Default is 0') argument_parser.add_argument('--with_truncated_backpropagation', required=False, help='Whether not to use truncated backpropagation(0/1). Default is 0') argument_parser.add_argument('--with_accumulated_error', required=False, help='Whether to accumulate errors over the moving windows. Default is 0') argument_parser.add_argument('--seed', required=True, help='Integer seed to use as the random seed') # parse the user arguments args = argument_parser.parse_args() dataset_name = args.dataset_name initial_hyperparameter_values_file = args.initial_hyperparameter_values_file binary_train_file_path_train_mode = args.binary_train_file_train_mode binary_validation_file_path_train_mode = args.binary_valid_file_train_mode contain_zero_values = int(args.contain_zero_values) if args.input_size: input_size = int(args.input_size) else: input_size = 0 output_size = int(args.forecast_horizon) optimizer = args.optimizer hyperparameter_tuning = args.hyperparameter_tuning model_type = args.model_type input_format = args.input_format seed = int(args.seed) if args.without_stl_decomposition: without_stl_decomposition = bool(int(args.without_stl_decomposition)) else: without_stl_decomposition = False if args.with_truncated_backpropagation: with_truncated_backpropagation = bool(int(args.with_truncated_backpropagation)) else: with_truncated_backpropagation = False if args.cell_type: cell_type = args.cell_type else: cell_type = "LSTM" if args.with_accumulated_error: with_accumulated_error = bool(int(args.with_accumulated_error)) else: with_accumulated_error = False if args.address_near_zero_instability: address_near_zero_instability = bool(int(args.address_near_zero_instability)) else: address_near_zero_instability = False if args.integer_conversion: integer_conversion = bool(int(args.integer_conversion)) else: integer_conversion = False if with_truncated_backpropagation: tbptt_identifier = "with_truncated_backpropagation" else: tbptt_identifier = "without_truncated_backpropagation" if without_stl_decomposition: stl_decomposition_identifier = "without_stl_decomposition" else: stl_decomposition_identifier = "with_stl_decomposition" if with_accumulated_error: accumulated_error_identifier = "with_accumulated_error" else: accumulated_error_identifier = "without_accumulated_error" model_identifier = dataset_name + "_" + model_type + "_" + cell_type + "cell" + "_" + input_format + "_" + stl_decomposition_identifier + "_" + hyperparameter_tuning + "_" + optimizer + "_" + tbptt_identifier + "_" + accumulated_error_identifier + "_" + str( seed) print("Model Training Started for {}".format(model_identifier)) # select the optimizer if optimizer == "cocob": optimizer_fn = cocob_optimizer_fn elif optimizer == "adagrad": optimizer_fn = adagrad_optimizer_fn elif optimizer == "adam": optimizer_fn = adam_optimizer_fn # define the key word arguments for the different model types model_kwargs = { 'use_bias': BIAS, 'use_peepholes': LSTM_USE_PEEPHOLES, 'input_size': input_size, 'output_size': output_size, 'binary_train_file_path': binary_train_file_path_train_mode, 'binary_validation_file_path': binary_validation_file_path_train_mode, 'contain_zero_values': contain_zero_values, 'address_near_zero_instability': address_near_zero_instability, 'integer_conversion': integer_conversion, 'seed': seed, 'cell_type': cell_type, 'without_stl_decomposition': without_stl_decomposition } # select the model type if model_type == "stacking": model_trainer = StackingModelTrainer(**model_kwargs) elif model_type == "seq2seq": model_trainer = Seq2SeqModelTrainerWithNonMovingWindowUnaccumulatedError(**model_kwargs) elif model_type == "seq2seqwithdenselayer": if input_format == "non_moving_window": model_trainer = Seq2SeqModelTrainerWithDenseLayerNonMovingWindowUnaccumulatedError(**model_kwargs) elif input_format == "moving_window": model_trainer = Seq2SeqModelTrainerWithDenseLayerMovingWindow(**model_kwargs) # read the initial hyperparamter configurations from the file hyperparameter_values_dic = read_initial_hyperparameter_values(initial_hyperparameter_values_file) optimized_configuration = smac() # persist the optimized configuration to a file persist_results(optimized_configuration, optimized_config_directory + '/' + model_identifier + '.txt') # get the validation errors for the best hyperparameter configs smape_error, smape_error_list = train_model(optimized_configuration) # write the final list of validation errors to a file validation_errors_file = model_training_configs.VALIDATION_ERRORS_DIRECTORY + model_identifier + ".csv" with open(validation_errors_file, "w") as output: writer = csv.writer(output, lineterminator='\n') writer.writerow(smape_error_list) print("Optimized configuration: {}".format(optimized_configuration)) print("Optimized Value: {}\n".format(smape_error)) # test the model for i in range(1, 11): args.seed = i testing(args, optimized_configuration)
17,829
51.908012
262
py
time-series-forecasting-release
time-series-forecasting-release/generic_model_tester.py
import csv import tensorflow as tf # import the different model types # stacking model from rnn_architectures.stacking_model.stacking_model_tester import \ StackingModelTester as StackingModelTester # seq2seq model with decoder from rnn_architectures.seq2seq_model.with_decoder.non_moving_window.unaccumulated_error.seq2seq_model_tester import \ Seq2SeqModelTester as Seq2SeqModelTesterWithNonMovingWindowUnaccumulatedError # seq2seq model with dense layer from rnn_architectures.seq2seq_model.with_dense_layer.non_moving_window.unaccumulated_error.seq2seq_model_tester import \ Seq2SeqModelTesterWithDenseLayer as Seq2SeqModelTesterWithDenseLayerNonMovingWindowUnaccumulatedError from rnn_architectures.seq2seq_model.with_dense_layer.moving_window.unaccumulated_error.seq2seq_model_tester import \ Seq2SeqModelTesterWithDenseLayer as Seq2SeqModelTesterWithDenseLayerMovingWindow # import the cocob optimizer from external_packages import cocob_optimizer from utility_scripts.invoke_r_final_evaluation import invoke_r_script from configs.global_configs import model_testing_configs LSTM_USE_PEEPHOLES = True BIAS = False learning_rate = 0.0 # function to create the optimizer def adagrad_optimizer_fn(total_loss): return tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(total_loss) def adam_optimizer_fn(total_loss): return tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss) def cocob_optimizer_fn(total_loss): return cocob_optimizer.COCOB().minimize(loss=total_loss) def testing(args, config_dictionary): # to make the random number choices reproducible global learning_rate dataset_name = args.dataset_name contain_zero_values = int(args.contain_zero_values) binary_train_file_path_test_mode = args.binary_train_file_test_mode binary_test_file_path_test_mode = args.binary_test_file_test_mode txt_test_file_path = args.txt_test_file actual_results_file_path = args.actual_results_file original_data_file_path = args.original_data_file if (args.input_size): input_size = int(args.input_size) else: input_size = 0 output_size = int(args.forecast_horizon) seasonality_period = int(args.seasonality_period) optimizer = args.optimizer hyperparameter_tuning = args.hyperparameter_tuning model_type = args.model_type input_format = args.input_format seed = int(args.seed) if args.without_stl_decomposition: without_stl_decomposition = bool(int(args.without_stl_decomposition)) else: without_stl_decomposition = False if args.with_truncated_backpropagation: with_truncated_backpropagation = bool(int(args.with_truncated_backpropagation)) else: with_truncated_backpropagation = False if args.cell_type: cell_type = args.cell_type else: cell_type = "LSTM" if args.with_accumulated_error: with_accumulated_error = bool(int(args.with_accumulated_error)) else: with_accumulated_error = False if args.address_near_zero_instability: address_near_zero_instability = bool(int(args.address_near_zero_instability)) else: address_near_zero_instability = False if args.integer_conversion: integer_conversion = bool(int(args.integer_conversion)) else: integer_conversion = False if not with_truncated_backpropagation: tbptt_identifier = "without_truncated_backpropagation" else: tbptt_identifier = "with_truncated_backpropagation" if not without_stl_decomposition: stl_decomposition_identifier = "with_stl_decomposition" else: stl_decomposition_identifier = "without_stl_decomposition" if with_accumulated_error: accumulated_error_identifier = "with_accumulated_error" else: accumulated_error_identifier = "without_accumulated_error" model_identifier = dataset_name + "_" + model_type + "_" + cell_type + "cell" + "_" + input_format + "_" + stl_decomposition_identifier + "_" + hyperparameter_tuning + "_" + optimizer + "_" + tbptt_identifier + "_" + accumulated_error_identifier + "_" + str( seed) print("Model Testing Started for {}".format(model_identifier)) print(config_dictionary) # select the optimizer if optimizer == "cocob": optimizer_fn = cocob_optimizer_fn elif optimizer == "adagrad": optimizer_fn = adagrad_optimizer_fn elif optimizer == "adam": optimizer_fn = adam_optimizer_fn # define the key word arguments for the different model types model_kwargs = { 'use_bias': BIAS, 'use_peepholes': LSTM_USE_PEEPHOLES, 'input_size': input_size, 'output_size': output_size, 'binary_train_file_path': binary_train_file_path_test_mode, 'binary_test_file_path': binary_test_file_path_test_mode, 'seed': seed, 'cell_type': cell_type, 'without_stl_decomposition': without_stl_decomposition } # select the model type if model_type == "stacking": model_tester = StackingModelTester(**model_kwargs) elif model_type == "seq2seq": model_tester = Seq2SeqModelTesterWithNonMovingWindowUnaccumulatedError(**model_kwargs) elif model_type == "seq2seqwithdenselayer": if input_format == "non_moving_window": model_tester = Seq2SeqModelTesterWithDenseLayerNonMovingWindowUnaccumulatedError(**model_kwargs) elif input_format == "moving_window": model_tester = Seq2SeqModelTesterWithDenseLayerMovingWindow(**model_kwargs) if 'rate_of_learning' in config_dictionary: learning_rate = config_dictionary['rate_of_learning'] num_hidden_layers = config_dictionary['num_hidden_layers'] max_num_epochs = config_dictionary['max_num_epochs'] max_epoch_size = config_dictionary['max_epoch_size'] cell_dimension = config_dictionary['cell_dimension'] l2_regularization = config_dictionary['l2_regularization'] minibatch_size = config_dictionary['minibatch_size'] gaussian_noise_stdev = config_dictionary['gaussian_noise_stdev'] random_normal_initializer_stdev = config_dictionary['random_normal_initializer_stdev'] list_of_forecasts = model_tester.test_model(num_hidden_layers=int(round(num_hidden_layers)), cell_dimension=int(round(cell_dimension)), minibatch_size=int(round(minibatch_size)), max_epoch_size=int(round(max_epoch_size)), max_num_epochs=int(round(max_num_epochs)), l2_regularization=l2_regularization, gaussian_noise_stdev=gaussian_noise_stdev, random_normal_initializer_stdev=random_normal_initializer_stdev, optimizer_fn=optimizer_fn) # write the forecasting results to a file rnn_forecasts_file_path = model_testing_configs.RNN_FORECASTS_DIRECTORY + model_identifier + '.txt' with open(rnn_forecasts_file_path, "w") as output: writer = csv.writer(output, lineterminator='\n') writer.writerows(list_of_forecasts) # invoke the final evaluation R script error_file_name = model_identifier + '.txt' if input_format == "moving_window": invoke_r_script((rnn_forecasts_file_path, error_file_name, txt_test_file_path, actual_results_file_path, original_data_file_path, str(input_size), str(output_size), str(contain_zero_values), str(int(address_near_zero_instability)), str(int(integer_conversion)), str(int(seasonality_period)), str(int(without_stl_decomposition))), True) else: invoke_r_script((rnn_forecasts_file_path, error_file_name, txt_test_file_path, actual_results_file_path, original_data_file_path, str(output_size), str(contain_zero_values), str(int(address_near_zero_instability)), str(int(integer_conversion)), str(int(seasonality_period)), str(int(without_stl_decomposition))), False)
8,308
41.829897
269
py
time-series-forecasting-release
time-series-forecasting-release/external_packages/cocob_optimizer/cocob_optimizer.py
# Copyright 2017 Francesco Orabona. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ''' COntinuos COin Betting (COCOB) optimizer ''' from tensorflow.python.framework import ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.framework import constant_op from tensorflow.python.training.optimizer import Optimizer import tensorflow as tf class COCOB(Optimizer): def __init__(self, alpha=100, use_locking=False, name='COCOB'): ''' constructs a new COCOB optimizer ''' super(COCOB, self).__init__(use_locking, name) self._alpha = alpha def _create_slots(self, var_list): for v in var_list: with ops.colocate_with(v): gradients_sum = constant_op.constant(0, shape=v.get_shape(), dtype=v.dtype.base_dtype) grad_norm_sum = constant_op.constant(0, shape=v.get_shape(), dtype=v.dtype.base_dtype) L = constant_op.constant(1e-8, shape=v.get_shape(), dtype=v.dtype.base_dtype) tilde_w = constant_op.constant(0.0, shape=v.get_shape(), dtype=v.dtype.base_dtype) reward = constant_op.constant(0.0, shape=v.get_shape(), dtype=v.dtype.base_dtype) self._get_or_make_slot(v, L, "L", self._name) self._get_or_make_slot(v, grad_norm_sum, "grad_norm_sum", self._name) self._get_or_make_slot(v, gradients_sum, "gradients_sum", self._name) self._get_or_make_slot(v, tilde_w, "tilde_w", self._name) self._get_or_make_slot(v, reward, "reward", self._name) def _apply_dense(self, grad, var): gradients_sum = self.get_slot(var, "gradients_sum") grad_norm_sum = self.get_slot(var, "grad_norm_sum") tilde_w = self.get_slot(var, "tilde_w") L = self.get_slot(var, "L") reward = self.get_slot(var, "reward") L_update = tf.maximum(L,tf.abs(grad)) gradients_sum_update = gradients_sum + grad grad_norm_sum_update = grad_norm_sum + tf.abs(grad) reward_update = tf.maximum(reward-grad*tilde_w,0) new_w = -gradients_sum_update/(L_update*(tf.maximum(grad_norm_sum_update+L_update,self._alpha*L_update)))*(reward_update+L_update) var_update = var-tilde_w+new_w tilde_w_update=new_w gradients_sum_update_op = state_ops.assign(gradients_sum, gradients_sum_update) grad_norm_sum_update_op = state_ops.assign(grad_norm_sum, grad_norm_sum_update) var_update_op = state_ops.assign(var, var_update) tilde_w_update_op = state_ops.assign(tilde_w, tilde_w_update) L_update_op = state_ops.assign(L, L_update) reward_update_op = state_ops.assign(reward, reward_update) return control_flow_ops.group(*[gradients_sum_update_op, var_update_op, grad_norm_sum_update_op, tilde_w_update_op, reward_update_op, L_update_op]) def _apply_sparse(self, grad, var): return self._apply_dense(grad, var) def _resource_apply_dense(self, grad, handle): return self._apply_dense(grad, handle)
4,063
43.659341
138
py
time-series-forecasting-release
time-series-forecasting-release/external_packages/cocob_optimizer/__init__.py
from cocob_optimizer import *
29
29
29
py
time-series-forecasting-release
time-series-forecasting-release/preprocess_scripts/CIF_2016/moving_window/create_o12_tfrecords.py
from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter import os output_path = "../../../datasets/binary_data/CIF_2016/moving_window/" if not os.path.exists(output_path): os.makedirs(output_path) if __name__ == '__main__': tfrecord_writer = TFRecordWriter( input_size = 15, output_size = 12, train_file_path = '../../../datasets/text_data/CIF_2016/moving_window/stl_12i15.txt', validate_file_path = '../../../datasets/text_data/CIF_2016/moving_window/stl_12i15v.txt', test_file_path = '../../../datasets/text_data/CIF_2016/moving_window/cif12test.txt', binary_train_file_path = output_path + 'stl_12i15.tfrecords', binary_validation_file_path = output_path + 'stl_12i15v.tfrecords', binary_test_file_path = output_path + 'cif12test.tfrecords' ) tfrecord_writer.read_text_data() tfrecord_writer.write_train_data_to_tfrecord_file() tfrecord_writer.write_validation_data_to_tfrecord_file() tfrecord_writer.write_test_data_to_tfrecord_file()
1,052
44.782609
97
py
time-series-forecasting-release
time-series-forecasting-release/preprocess_scripts/CIF_2016/moving_window/create_o6_tfrecords.py
from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter import os output_path = "../../../datasets/binary_data/CIF_2016/moving_window/" if not os.path.exists(output_path): os.makedirs(output_path) if __name__ == '__main__': tfrecord_writer = TFRecordWriter( input_size = 7, output_size = 6, train_file_path = '../../../datasets/text_data/CIF_2016/moving_window/stl_6i7.txt', validate_file_path = '../../../datasets/text_data/CIF_2016/moving_window/stl_6i7v.txt', test_file_path = '../../../datasets/text_data/CIF_2016/moving_window/cif6test.txt', binary_train_file_path = output_path + 'stl_6i7.tfrecords', binary_validation_file_path = output_path + 'stl_6i7v.tfrecords', binary_test_file_path = output_path + 'cif6test.tfrecords' ) tfrecord_writer.read_text_data() tfrecord_writer.write_train_data_to_tfrecord_file() tfrecord_writer.write_validation_data_to_tfrecord_file() tfrecord_writer.write_test_data_to_tfrecord_file()
1,040
44.26087
95
py
time-series-forecasting-release
time-series-forecasting-release/preprocess_scripts/CIF_2016/moving_window/without_stl_decomposition/create_o12_tfrecords.py
from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter import os output_path = "../../../../datasets/binary_data/CIF_2016/moving_window/without_stl_decomposition/" if not os.path.exists(output_path): os.makedirs(output_path) if __name__ == '__main__': tfrecord_writer = TFRecordWriter( input_size = 15, output_size = 12, train_file_path = '../../../../datasets/text_data/CIF_2016/moving_window/without_stl_decomposition/cif_12i15.txt', validate_file_path = '../../../../datasets/text_data/CIF_2016/moving_window/without_stl_decomposition/cif_12i15v.txt', test_file_path = '../../../../datasets/text_data/CIF_2016/moving_window/without_stl_decomposition/cif12test.txt', binary_train_file_path = output_path + 'cif_12i15.tfrecords', binary_validation_file_path = output_path + 'cif_12i15v.tfrecords', binary_test_file_path = output_path + 'cif12test.tfrecords' ) tfrecord_writer.read_text_data() tfrecord_writer.write_train_data_to_tfrecord_file() tfrecord_writer.write_validation_data_to_tfrecord_file() tfrecord_writer.write_test_data_to_tfrecord_file()
1,172
50
126
py
time-series-forecasting-release
time-series-forecasting-release/preprocess_scripts/CIF_2016/moving_window/without_stl_decomposition/create_o6_tfrecords.py
from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter import os output_path = "../../../../datasets/binary_data/CIF_2016/moving_window/without_stl_decomposition/" if not os.path.exists(output_path): os.makedirs(output_path) if __name__ == '__main__': tfrecord_writer = TFRecordWriter( input_size = 7, output_size = 6, train_file_path = '../../../../datasets/text_data/CIF_2016/moving_window/without_stl_decomposition/cif_6i7.txt', validate_file_path = '../../../../datasets/text_data/CIF_2016/moving_window/without_stl_decomposition/cif_6i7v.txt', test_file_path = '../../../../datasets/text_data/CIF_2016/moving_window/without_stl_decomposition/cif6test.txt', binary_train_file_path = output_path + 'cif_6i7.tfrecords', binary_validation_file_path = output_path + 'cif_6i7v.tfrecords', binary_test_file_path = output_path + 'cif6test.tfrecords' ) tfrecord_writer.read_text_data() tfrecord_writer.write_train_data_to_tfrecord_file() tfrecord_writer.write_validation_data_to_tfrecord_file() tfrecord_writer.write_test_data_to_tfrecord_file()
1,156
49.304348
124
py
time-series-forecasting-release
time-series-forecasting-release/preprocess_scripts/CIF_2016/non_moving_window/create_o12_tfrecords.py
from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter import os output_path = "../../../datasets/binary_data/CIF_2016/non_moving_window/" if not os.path.exists(output_path): os.makedirs(output_path) if __name__ == '__main__': tfrecord_writer = TFRecordWriter( output_size = 12, train_file_path='../../../datasets/text_data/CIF_2016/non_moving_window/cif_stl_12.txt', validate_file_path='../../../datasets/text_data/CIF_2016/non_moving_window/cif_stl_12v.txt', test_file_path='../../../datasets/text_data/CIF_2016/non_moving_window/cif_test_12.txt', binary_train_file_path=output_path + 'cif_stl_12.tfrecords', binary_validation_file_path=output_path + 'cif_stl_12v.tfrecords', binary_test_file_path=output_path + 'cif_test_12.tfrecords', without_stl_decomposition=False ) tfrecord_writer.read_text_data() tfrecord_writer.write_train_data_to_tfrecord_file() tfrecord_writer.write_validation_data_to_tfrecord_file() tfrecord_writer.write_test_data_to_tfrecord_file()
1,084
46.173913
100
py
time-series-forecasting-release
time-series-forecasting-release/preprocess_scripts/CIF_2016/non_moving_window/create_o6_tfrecords.py
from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter import os output_path = "../../../datasets/binary_data/CIF_2016/non_moving_window/" if not os.path.exists(output_path): os.makedirs(output_path) if __name__ == '__main__': tfrecord_writer = TFRecordWriter( output_size = 6, train_file_path = '../../../datasets/text_data/CIF_2016/non_moving_window/cif_stl_6.txt', validate_file_path = '../../../datasets/text_data/CIF_2016/non_moving_window/cif_stl_6v.txt', test_file_path = '../../../datasets/text_data/CIF_2016/non_moving_window/cif_test_6.txt', binary_train_file_path = output_path + 'cif_stl_6.tfrecords', binary_validation_file_path = output_path + 'cif_stl_6v.tfrecords', binary_test_file_path = output_path + 'cif_test_6.tfrecords', ) tfrecord_writer.read_text_data() tfrecord_writer.write_train_data_to_tfrecord_file() tfrecord_writer.write_validation_data_to_tfrecord_file() tfrecord_writer.write_test_data_to_tfrecord_file()
1,049
46.727273
101
py