repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/vision_transformer.py | '''
This code is modified from https://github.com/facebookresearch/convit. To adapt the vit/convit to image reconstruction, variable input sizes, and patch sizes for both spatial dimensions.
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, grid_size=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(1*torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention(self, x):
B, N, C = x.shape
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
pos_score = self.pos_proj(self.rel_indices).expand(B, -1, -1,-1).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn = attn / attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.v.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices()
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., grid_size=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
self.get_rel_indices()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qkv.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, grid_size):
self.attn.current_grid_size = grid_size
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, patch_size, in_chans, embed_dim):
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
x = self.proj(x)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class VisionTransformer(nn.Module):
""" Vision Transformer
"""
def __init__(self, avrg_img_size=320, patch_size=16, in_chans=1, embed_dim=64, depth=8,
num_heads=9, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=None,
gpsa_interval=[-1, -1], locality_strength=1., use_pos_embed=True):
super().__init__()
self.depth = depth
embed_dim *= num_heads
self.num_features = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if isinstance(avrg_img_size, int):
img_size = to_2tuple(avrg_img_size)
if isinstance(patch_size, int):
self.patch_size = to_2tuple(patch_size)
else:
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed(
patch_size=self.patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(
torch.zeros(1, embed_dim,
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1])
)
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i>=gpsa_interval[0]-1 and i<gpsa_interval[1] else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False,)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def seq2img(self, x, img_size):
"""
Transforms sequence back into image space, input dims: [batch_size, num_patches, channels]
output dims: [batch_size, channels, H, W]
"""
x = x.view(x.shape[0], x.shape[1], self.in_chans, self.patch_size[0], self.patch_size[1])
x = x.chunk(x.shape[1], dim=1)
x = torch.cat(x, dim=4).permute(0,1,2,4,3)
x = x.chunk(img_size[0]//self.patch_size[0], dim=3)
x = torch.cat(x, dim=4).permute(0,1,2,4,3).squeeze(1)
return x
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self,):
return {'pos_embed'}
def get_head(self,):
return self.head
def reset_head(self,):
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def forward_features(self, x, k=None):
x = self.patch_embed(x)
_, _, H, W = x.shape
if self.use_pos_embed:
pos_embed = F.interpolate(self.pos_embed, size=[H, W], mode='bilinear', align_corners = False)
x = x + pos_embed
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
x = blk(x, (H, W))
if k is not None and u == k:
self.attention_map = blk.attn.get_attention_map(x, return_map = True)
x = self.norm(x)
return x
def forward(self, x, k=None):
_, _, H, W = x.shape
x = self.forward_features(x, k)
x = self.head(x)
x = self.seq2img(x, (H, W))
return x | 15,082 | 39.007958 | 186 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/recon_net.py | import torch.nn as nn
import torch.nn.functional as F
from math import ceil, floor
from .unet import Unet
from .vision_transformer import VisionTransformer
class ReconNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
def pad(self, x):
_, _, h, w = x.shape
hp, wp = self.net.patch_size
f1 = ( (wp - w % wp) % wp ) / 2
f2 = ( (hp - h % hp) % hp ) / 2
wpad = [floor(f1), ceil(f1)]
hpad = [floor(f2), ceil(f2)]
x = F.pad(x, wpad+hpad)
return x, wpad, hpad
def unpad(self, x, wpad, hpad):
return x[..., hpad[0] : x.shape[-2]-hpad[1], wpad[0] : x.shape[-1]-wpad[1]]
def norm(self, x):
mean = x.view(x.shape[0], 1, 1, -1).mean(-1, keepdim=True)
std = x.view(x.shape[0], 1, 1, -1,).std(-1, keepdim=True)
x = (x-mean)/std
return x, mean, std
def unnorm(self, x, mean, std):
return x * std + mean
def vit_forward(self, x, k=None):
x, wpad, hpad = self.pad(x)
x, mean, std = self.norm(x)
x = self.net(x, k)
x = self.unnorm(x, mean, std)
x = self.unpad(x, wpad, hpad)
return x
def unet_forward(self, x):
x, mean, std = self.norm(x)
x = self.net(x)
x = self.unnorm(x, mean, std)
return x
def mixer_forward(self, x):
x, mean, std = self.norm(x)
x = self.net(x)
x = self.unnorm(x, mean, std)
return x
def forward(self, x, k=None):
if isinstance(self.net, Unet):
return self.unet_forward(x)
elif isinstance(self.net, VisionTransformer):
return self.vit_forward(x, k)
else:
return self.mixer_forward(x)
| 1,932 | 25.847222 | 90 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
out_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 5,979 | 31.677596 | 88 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/__init__.py | from .recon_net import ReconNet
from .vision_transformer import VisionTransformer
from .img2img_mixer import Img2Img_Mixer
from .unet import Unet
| 146 | 28.4 | 49 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/original_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbeddings(nn.Module):
def __init__(
self,
patch_size: int,
hidden_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=hidden_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b (h w) c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchEmbeddings_transpose(nn.Module):
def __init__(
self,
patch_size: int,
hidden_dim: int,
channels: int,
d: int
):
super().__init__()
self.proj_transpose = nn.Sequential(
Rearrange("b (h w) c -> b c h w", h=d),
nn.ConvTranspose2d(
in_channels=hidden_dim,
out_channels=channels,
kernel_size=patch_size,
stride=patch_size
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj_transpose(x)
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class MixerBlock(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
tokens_hidden_dim: int,
channels_hidden_dim: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b p c -> b c p"),
MLPBlock(num_patches, tokens_hidden_dim),
Rearrange("b c p -> b p c")
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, channels_hidden_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Original_Mixer(nn.Module):
def __init__(
self,
image_size: int = 256,
channels: int = 1,
patch_size: int = 4,
num_layers: int = 8,
hidden_dim: int = 128,
tokens_hidden_dim: int = 96,
channels_hidden_dim: int = 256
):
super().__init__()
num_patches = (image_size // patch_size) ** 2
d=(image_size-patch_size)//patch_size + 1
self.embed = PatchEmbeddings(patch_size, hidden_dim, channels)
layers = [
MixerBlock(
num_patches=num_patches,
num_channels=hidden_dim,
tokens_hidden_dim=tokens_hidden_dim,
channels_hidden_dim=channels_hidden_dim
)
for _ in range(num_layers)
]
self.layers = nn.Sequential(*layers)
self.norm = nn.LayerNorm(hidden_dim)
self.embed_transpose = PatchEmbeddings_transpose(patch_size, hidden_dim, channels, d)
def forward(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w = x.shape
x = self.embed(x)
x = self.layers(x)
x = self.norm(x)
x = self.embed_transpose(x)
return x | 3,674 | 26.840909 | 93 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/img2img_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Img2Img_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 1,
patch_size: int = 4,
embed_dim: int = 128,
num_layers: int = 16,
f_hidden: int = 8,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x | 3,618 | 27.054264 | 127 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/vit.py | '''
This code is modified from https://github.com/facebookresearch/convit. To adapt the vit/convit to image reconstruction, variable input sizes, and patch sizes for both spatial dimensions.
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, grid_size=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(1*torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention(self, x):
B, N, C = x.shape
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
pos_score = self.pos_proj(self.rel_indices).expand(B, -1, -1,-1).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn = attn / attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.v.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices()
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., grid_size=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
self.get_rel_indices()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qkv.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, grid_size):
self.attn.current_grid_size = grid_size
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, patch_size, in_chans, embed_dim):
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
x = self.proj(x)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class VisionTransformer(nn.Module):
""" Vision Transformer
"""
def __init__(self, avrg_img_size=320, patch_size=16, in_chans=1, embed_dim=64, depth=8,
num_heads=9, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=None,
gpsa_interval=[-1, -1], locality_strength=1., use_pos_embed=True):
super().__init__()
self.depth = depth
embed_dim *= num_heads
self.num_features = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if isinstance(avrg_img_size, int):
img_size = to_2tuple(avrg_img_size)
if isinstance(patch_size, int):
self.patch_size = to_2tuple(patch_size)
else:
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed(
patch_size=self.patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(
torch.zeros(1, embed_dim,
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1])
)
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i>=gpsa_interval[0]-1 and i<gpsa_interval[1] else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False,)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def seq2img(self, x, img_size):
"""
Transforms sequence back into image space, input dims: [batch_size, num_patches, channels]
output dims: [batch_size, channels, H, W]
"""
x = x.view(x.shape[0], x.shape[1], self.in_chans, self.patch_size[0], self.patch_size[1])
x = x.chunk(x.shape[1], dim=1)
x = torch.cat(x, dim=4).permute(0,1,2,4,3)
x = x.chunk(img_size[0]//self.patch_size[0], dim=3)
x = torch.cat(x, dim=4).permute(0,1,2,4,3).squeeze(1)
return x
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self,):
return {'pos_embed'}
def get_head(self,):
return self.head
def reset_head(self,):
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def forward_features(self, x, k=None):
x = self.patch_embed(x)
_, _, H, W = x.shape
if self.use_pos_embed:
pos_embed = F.interpolate(self.pos_embed, size=[H, W], mode='bilinear', align_corners = False)
x = x + pos_embed
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
x = blk(x, (H, W))
if k is not None and u == k:
self.attention_map = blk.attn.get_attention_map(x, return_map = True)
x = self.norm(x)
return x
def forward(self, x, k=None):
_, _, H, W = x.shape
x = self.forward_features(x, k)
x = self.head(x)
x = self.seq2img(x, (H, W))
return x | 15,082 | 39.007958 | 186 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/recon_net.py | import torch.nn as nn
import torch.nn.functional as F
from math import ceil, floor
class ReconNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
def pad(self, x):
_, _, h, w = x.shape
hp, wp = self.net.patch_size
f1 = ( (wp - w % wp) % wp ) / 2
f2 = ( (hp - h % hp) % hp ) / 2
wpad = [floor(f1), ceil(f1)]
hpad = [floor(f2), ceil(f2)]
x = F.pad(x, wpad+hpad)
return x, wpad, hpad
def unpad(self, x, wpad, hpad):
return x[..., hpad[0] : x.shape[-2]-hpad[1], wpad[0] : x.shape[-1]-wpad[1]]
def forward(self, x, k=None):
x, wpad, hpad = self.pad(x)
x = self.net(x, k)
x = self.unpad(x, wpad, hpad)
return x | 810 | 26.033333 | 90 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int = 1,
out_chans: int = 1,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image) | 5,981 | 32.79661 | 88 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/__init__.py | from .img2img_mixer import *
from .original_mixer import *
from .unet import *
from .recon_net import ReconNet
from .vit import VisionTransformer
| 146 | 23.5 | 34 | py |
subgraph-counts-hoeffding | subgraph-counts-hoeffding-main/estimate_N.py | #Download the data and compute the lower bound for N from Corollary 5 of our paper.
import sys
import math
import argparse
import pandas as pd
import numpy as np
parser=argparse.ArgumentParser()
parser.add_argument("--h", type=int, help="number of vertices in H")
parser.add_argument("--i_T", type=int, help="number of internal vertices in T", default=1)
parser.add_argument("--epsilon", type=float, help="precision required (fraction of E D^{h-1})", default=0.1)
parser.add_argument("--p", type=float, help="1-p is the confidence required", default=0.05)
parser.add_argument("--n_vertices", type=int, help="number of vertices in G", default=0)
parser.add_argument("--min_n", type=int, help="number of vertices in G", default=10000)
parser.add_argument("--O_size", type=int, help="number of vertices in O (Theorem 6)", default=1)
args,filenames = parser.parse_known_args()
n_practical = 0
n_total = 0
n=args.n_vertices
for filename in filenames:
try:
df=pd.read_csv(filename, names=["degree","freq"], skiprows=1, dtype={"degree": int, "freq": np.float32})
except:
print(f"Wrong format in file {filename}. Is it a CSV file containing two columns degree and freq?")
continue
if args.n_vertices == 0:
n = df.freq.values.sum()
else:
n = args.n_vertices
if n < args.min_n:
continue
df = df.sort_values("degree", ascending=False).reset_index(drop=True)
S = (df.degree**(args.h-1) * df.freq).cumsum() / df.freq.sum()
ED_h_minus_one = S.values[-1]
#find minimum \Delta such that the empirical distribution of the degree \tilde{D}
#satisfies \E \tilde{D}^{h-1} \mathbb{I}_{\tilde{D} \ge \Delta} < \epsilon \E \tilde{D}^{h-1}
Delta = df.degree.values[np.where(S.values > args.epsilon * ED_h_minus_one)[0][0]] + 1
#use \lambda = \epsilon \E \tilde{D}^{h-1}
lambd = args.epsilon * ED_h_minus_one / args.i_T
s = args.epsilon * ED_h_minus_one
N_hoeffding = max(1,int(math.ceil(0.5 * math.exp(2 * (args.h - args.O_size) * math.log(Delta-1) - 2 * math.log(s)) * math.log(2/args.p))))
if N_hoeffding < n:
n_practical += 1
n_total += 1
print(f"n = {n} N = {N_hoeffding} E D^{args.h-1} = {round(ED_h_minus_one,1)} max degree = {max(df.degree)} Delta = {Delta} {'practical' if N_hoeffding < n else 'useless'} {filename}")
#Totals are printed in the end
print(f"h={args.h} n_practical={n_practical} n_total={n_total} ratio={None if n_total==0 else round(n_practical/n_total,2)}")
| 2,501 | 42.894737 | 188 | py |
conker | conker-main/driver.py | """
Copyright (C) 2021 Gebri Mishtaku
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses.
"""
import os
from src.parser import Parser
from src.utils import remove_ext
from src.centerfinder import CenterFinder
from src.correlator import Correlator
def main():
print('\n\n\nPlease reference original publication arXiv:XXXXXXXXXXXXX '\
'when using this software for publishing/redistribution.\n\n\n')
parser = Parser()
args = parser.parse_args()
# preps output folder
filename1 = remove_ext(args.file1)
savename = 'out_f1_{}'.format(filename1)
if args.file0:
filename0 = remove_ext(args.file0)
savename += '_f0_{}'.format(filename0)
else:
filename0 = filename1
if args.randoms_file:
filenameR = remove_ext(args.randoms_file)
savename += '_fR_{}'.format(filenameR)
elif args.randoms_grid:
filenameR = remove_ext(args.randoms_grid)
savename += '_gR_{}'.format(filenameR)
else:
filenameR = filename1
savename += '/'
try:
os.mkdir(savename)
except FileExistsError:
pass
# sets up correlator object for run
corr = Correlator(args.order, args.file1, file0=args.file0,
fileR=args.randoms_file, file_gridR=args.randoms_grid,
nondiag=args.nondiag, save_randoms=args.save_randoms,
params_file=args.params_file, printout=args.verbose,
save=args.save, savename=savename)
# creates and puts cf object instance for randoms (non-conv background)
fileR = args.file1 if not args.randoms_file else args.randoms_file
cfR = CenterFinder(fileR, args.wtd_randoms,
args.params_file, args.save, args.verbose,
factorize=args.factorize_randoms)
corr.set_cfR(cfR)
# creates and customizes instance of CenterFinder object 0
file0 = args.file1 if not args.file0 else args.file0
cf0 = CenterFinder(file0, args.wtd_input0,
args.params_file, args.save, args.verbose, kernel_type='ball')
corr.set_cf0(cf0)
# creates and customizes instance of CenterFinder object 1
cf1 = CenterFinder(args.file1, args.wtd_input1,
args.params_file, args.save, args.verbose)
corr.set_cf1(cf1)
# histograms input catalog 0 to get boundaries
corr.prep_cf0(args)
# makes the randoms grid with input data boundaries
# sets Correlator object's randoms_grid attribute
# normalization: ((P_r * P_ad) / NR) * (ND / NR)
corr.make_cfR(corr.get_cf0().get_density_grid_edges())
# makes cf0 with randoms grid from cfR
corr.make_cf0(args)
# makes cf1 with randoms grid from cfR
corr.make_cf1(args)
# finds custom calib file or falls back to default
corr.load_calib()
# runs requested correlation and saves output
# scan command overrides single command
if args.scan:
if args.order == 2 or (args.order>2 and not args.nondiag):
corr.scan_correlate_diag(args.scan)
elif args.order > 2 and args.nondiag:
corr.scan_correlate_nondiag(args.scan)
else:
# TODO: integrate higher orders with args from -r1
corr.single_correlate()
if __name__ == '__main__':
main()
| 3,468 | 28.649573 | 74 | py |
conker | conker-main/calibrate.py | """
Copyright (C) 2021 Gebri Mishtaku
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses.
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from progress.bar import ChargingBar
from math import sqrt
from scipy.signal import fftconvolve
from src.parser import CalibrationParser
from src.kernel import Kernel
from src.utils import *
import time
def main():
parser = CalibrationParser()
args = parser.parse_args()
if args.scan:
try:
assert args.scan[0] < args.scan[1]
except AssertionError:
print('AssertionError: scan args have to be in ascending order')
sys.exit(1)
print('Loading input data...')
indir = 'data/'
outdir = 'calibration/'
input_file = args.fileR
params_file = 'params.json'
filename = remove_ext(input_file)
# for the main_calib_loop restart mechanic
kernel_center_cell_pts_floor = 5
restart_limit = 30
loadname = indir + input_file
G_ra, G_dec, G_redshift = load_data(loadname)
# weights don't matter for calibration
G_weights = np.ones(len(G_ra), dtype=float)
# gets cosmology and other hyperparameters
cosmology, grid_spacing, rounding_precision \
= load_hyperparameters(params_file)
# calculates lookup tables for fast conversion from r to z and vice versa
LUT_radii, LUT_redshifts = interpolate_r_z(G_redshift.min(),
G_redshift.max(), cosmology)
G_radii = LUT_radii(G_redshift)
print('Input loaded successfully...')
print('Initializing calibration...')
xyzs = np.array(sky2cartesian(G_ra, G_dec, G_redshift, LUT_radii))
galaxies_cartesian = xyzs.T # each point is represented by (x, y, z)
bin_counts_3d = np.array(
[np.ceil((xyzs[i].max() - xyzs[i].min()) / grid_spacing)
for i in range(len(xyzs))],
dtype=int)
histo_grid, histo_grid_edges = np.histogramdd(
galaxies_cartesian,
bins=bin_counts_3d,
weights=G_weights
)
print('Original grid idx ranges: (0,{}) (0,{}) (0,{})'\
.format(*histo_grid.shape))
xs, ys, zs = xyzs
x_edges, y_edges, z_edges = histo_grid_edges
rstart, rend, step = 110, 115, 5
if args.scan:
rstart, rend = args.scan
# have to invert scan boundaries
# because of the optimization for continuous querying
# see IMPORTANT below
step = -grid_spacing
rstart_temp = rend + step
rend = rstart + step
rstart = rstart_temp
# dict will save tuple per assumed distance: (actual,min,max)
rvsr = {r:(None,None,None) \
for r in np.arange(rstart, rend, step)}
def main_calib_loop(move_idxs_by) -> bool:
nonlocal outdir, filename
nonlocal grid_spacing, histo_grid
nonlocal xs, ys, zs
nonlocal x_edges, y_edges, z_edges
nonlocal rstart, rend, step
nonlocal rvsr
for r in np.arange(rstart, rend, step):
print('Calibrating for r =', r)
actual_ds = []
kernel = Kernel('step', r, grid_spacing, False, False)
kernel_grid = kernel.get_grid()
kc_idx = kernel.get_kernel_center()[0] # kernel center idx
print('Got kernel grid with shape:', kernel_grid.shape)
ks = kernel_grid.shape
f = ks[0] // 2
xmid, ymid, zmid = np.array(histo_grid.shape) // 2 + move_idxs_by
xmini, xmaxi = xmid-f, xmid+f+1
ymini, ymaxi = ymid-f, ymid+f+1
zmini, zmaxi = zmid-f, zmid+f+1
box = histo_grid[xmini:xmaxi, ymini:ymaxi, zmini:zmaxi]
print('Selected subgrid idx ranges: ({},{}) ({},{}) ({},{})'\
.format(xmini,xmaxi,ymini,ymaxi,zmini,zmaxi))
print('Got central subgrid with shape', box.shape)
print('Number of points in subgrid:', np.sum(box))
# locates correct cell idx boundaries in histo grid
xcmin_all, xcmax_all = x_edges[xmini], x_edges[xmaxi]
ycmin_all, ycmax_all = y_edges[ymini], y_edges[ymaxi]
zcmin_all, zcmax_all = z_edges[zmini], z_edges[zmaxi]
# finds all points within this coordinate range
pidxs_all = np.asarray(
(xs >= xcmin_all) & (xs < xcmax_all) \
& (ys >= ycmin_all) & (ys < ycmax_all) \
& (zs >= zcmin_all) & (zs < zcmax_all)
).nonzero()
# IMPORTANT: only for use in conjunction with descending kernel radii
# reassigns data coord arrays to just pts within curr bounds
# much faster querying in future loop iters
xs, ys, zs = xs[pidxs_all], ys[pidxs_all], zs[pidxs_all]
# calculates coordinates of kernel center
kcxmin, kcxmax = x_edges[xmini+kc_idx], x_edges[xmini+kc_idx+1]
kcymin, kcymax = y_edges[ymini+kc_idx], y_edges[ymini+kc_idx+1]
kczmin, kczmax = z_edges[zmini+kc_idx], z_edges[zmini+kc_idx+1]
# finds all points within the central cell of the kernel
pidxs_kc = np.asarray(
(xs >= kcxmin) & (xs < kcxmax) \
& (ys >= kcymin) & (ys < kcymax) \
& (zs >= kczmin) & (zs < kczmax)
).nonzero()
pts_in_kernel_center_cell = list(zip(xs[pidxs_kc], ys[pidxs_kc], zs[pidxs_kc]))
# need to restart someplace else in the catalog
# if there's no points in the center cell
# of the kernel in current configuration
# if len(pts_in_kernel_center_cell) == 0:
# # allows only 10 tries to restart
# if (move_idxs_by < restart_limit):
# print('Couldn\'t find enough points in center cell, '\
# 'restart nr. {}...'.format(move_idxs_by+1))
# return main_calib_loop(move_idxs_by+1)
# else:
# print('Reached restart limit, exiting...')
# return False
if (len(pts_in_kernel_center_cell) < kernel_center_cell_pts_floor):
# allows only 10 tries to restart
if (move_idxs_by < restart_limit):
print('Couldn\'t find enough points in center cell, '\
'restart nr. {}...'.format(move_idxs_by+1))
return main_calib_loop(move_idxs_by+1)
else:
print('Reached restart limit, exiting...')
return False
else:
print(f"Found '{len(pts_in_kernel_center_cell)}' points in kernel center cell")
# traverses inscribed surface in kernel grid
idxs = np.asarray(kernel_grid!=0).nonzero()
enough = 10**5 # upper bound for nr of pts for calib
# # pretty prints progress bar
progbar = ChargingBar('Calculating actual distances...',
max=len(idxs[0]),
suffix = '%(percent).1f%% - %(elapsed)ds')
cut = False
start = time.time()
for i, j, k in zip(*idxs):
# skips if respective cell in histo grid empty
if box[i,j,k] == 0:
progbar.next()
continue
# locates correct cell idx boundaries in histo grid
xi, yi, zi = xmini+i, ymini+j, zmini+k
xcmin, xcmax = x_edges[xi], x_edges[xi+1]
ycmin, ycmax = y_edges[yi], y_edges[yi+1]
zcmin, zcmax = z_edges[zi], z_edges[zi+1]
# finds all points within this i,j,k cell
pidxs = np.asarray(
(xs >= xcmin) & (xs < xcmax) \
& (ys >= ycmin) & (ys < ycmax) \
& (zs >= zcmin) & (zs < zcmax)
).nonzero()
pts_in_cell = list(zip(xs[pidxs], ys[pidxs], zs[pidxs]))
# registers actual distance vs assumed distance
assumed_d = r
for x, y, z in pts_in_cell:
for kcx, kcy, kcz in pts_in_kernel_center_cell:
actual_d = sqrt((x-kcx)**2 + (y-kcy)**2 + (z-kcz)**2)
actual_ds.append(actual_d)
# can end prematurely if enough pts have been considered
if len(actual_ds) >= enough:
cut = True
break
progbar.next()
end = time.time()
progbar.finish()
if cut:
print('Enough distances were calculated for calibration...')
print('Final nr of pts considered for calibration (were on shell):',
len(actual_ds))
print('Distances calculated in {} seconds'.format(end-start))
print('Calculating mean for calibration...')
actual_ds = np.array(actual_ds)
mean = np.mean(actual_ds)
meanerrs = actual_ds - mean
lomeanerrs = meanerrs[meanerrs < 0]
himeanerrs = meanerrs[meanerrs >= 0]
loerr = sqrt(np.mean(lomeanerrs**2))
hierr = sqrt(np.mean(himeanerrs**2))
# rvsr[r] = (mean,min(actual_ds),max(actual_ds))
rvsr[r] = (mean, loerr, hierr)
print('Mean actual distance:', mean)
mean = np.array(mean)
np.save('{}calib_mean_{}_gs_{}_r1_{}.npy'\
.format(outdir, filename, grid_spacing, r),
mean)
print('Histogramming and saving...')
plt.hist(actual_ds,
bins=20,
edgecolor='black',
label=r'$r_1=$'+str(r)+r'$h^{-1}Mpc$')
plt.title('ConKer: Actual distance vs. discrete distance')
plt.xlabel(r'$r$')
plt.ylabel('Count')
plt.legend()
plt.savefig('{}calib_hist_{}_gs_{}_r1_{}.png'\
.format(outdir, filename, grid_spacing, r),
dpi=300)
plt.clf()
return True
# this inline func is a facility for restarting the calibration
# in the case where there's no points in the central cell of the kernel
# given a particular configuration in space, the default calibration
# procedure starts in the middle of the survey space
if(not main_calib_loop(0)):
print('Calibration procedure ended unsuccessfully: '\
'couldn\'t find points in kernel center cell')
return
rs = np.array(list(rvsr.keys()))
vals = np.array(list(rvsr.values()))
means = vals[:,0]
loerrs = vals[:,1]
uperrs = vals[:,2]
meanerr = np.round(np.mean(rs - means),
decimals=rounding_precision)
np.save('{}calib_ref_{}_gs_{}.npy'\
.format(outdir, filename, grid_spacing),
meanerr)
plt.errorbar(rs, means, yerr=[loerrs,uperrs], label='uncalibrated', c='orange')
plt.errorbar(rs, means+meanerr, label='calibrated', c='cornflowerblue')
plt.title('ConKer: Actual distances vs. discrete distances')
plt.xlabel(r'$r_{discrete}$ $[h^{-1}Mpc]$')
plt.ylabel(r'$r_{actual}$ $[h^{-1}Mpc]$')
plt.xticks(ticks=rs[::2])
plt.yticks(ticks=rs)
plt.grid(linestyle=':')
plt.legend()
lo, hi = min(rvsr.keys()), max(rvsr.keys())
plt.savefig('{}calib_vsplot_{}_gs_{}_r1_{}_{}.png'\
.format(outdir, filename, grid_spacing, lo, hi),
dpi=300)
plt.close()
print('Mean separation error to {} decimal places:'\
.format(rounding_precision), meanerr)
print('Calibration ended successfully...')
if __name__ == '__main__':
main()
| 10,318 | 30.750769 | 83 | py |
conker | conker-main/src/parser.py | """
Copyright (C) 2021 Gebri Mishtaku
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses.
"""
from argparse import ArgumentParser
class Parser(ArgumentParser):
def __init__(self):
super().__init__(description=
'~~~~~~~~~~~~~~~~~ ( X ) ConKer ( X ) ~~~~~~~~~~~~~~~~~')
# core conker variables
self.add_argument('file1', metavar='INPUT_FILE_1', type=str,
help='Name of .fits file with the input catalog.')
self.add_argument('-f0', '--file0', type=str, default=None,
help='Name of .fits file with other input catalog '
'for cross-correlation. Auto-correlation if ommitted. ')
self.add_argument('-n', '--order', type=int, default=2,
help='Correlation order wanted. Has to be >= 2.')
self.add_argument('--nondiag', action='store_true',
help='Requests a non-diagonal correlation (all separation combos). '
'Omitting this requests a correlation on the main diagonal only.')
self.add_argument('--scan', nargs=2, type=float,
help='Calculates correlation function from 1st arg (iclusive) '
'to 2nd arg (exclusive) by step of grid_spacing.')
# ancillary behaviors
self.add_argument('-p', '--params_file', type=str, default='params.json',
help='Sets custom hyperparameters file.')
self.add_argument('-s', '--save', action='store_true',
help='Grids and .fits output will be automatically saved to an \'out\' folder.')
self.add_argument('-sR', '--save_randoms', action='store_true',
help='Randoms background grid will be saved to output folder.')
self.add_argument('-v', '--verbose', action='store_true',
help='The progress of CenterFinder will be printed out to standard output.')
# self.add_argument('-l', '--plot', action='store_true',
# help='A plot of the result (iso, scan only) will be saved to output.')
# these define behavior of randoms cf
randoms = self.add_mutually_exclusive_group()
randoms.add_argument('-fR', '--randoms_file', type=str, default=None,
help='Name of .fits file with randoms catalog for background.')
randoms.add_argument('-gR', '--randoms_grid', type=str, default=None,
help='Name of .npy file containing premade randoms backround grid.')
self.add_argument('-wR', '--wtd_randoms', action='store_true',
help='Randoms catalog will be interpreted as having weights on 4th col.')
self.add_argument('--factorize_randoms', action='store_true',
help='Randoms will just be histogrammed from the randoms catalog.')
# these define 1st kernel behavior
self.add_argument('-r1', '--kernel_radius1', type=float, help='Sets kernel radius.')
self.add_argument('--show_kernel1', action='store_true', help='Shows 1D kernel plot.')
kernel_types1 = self.add_mutually_exclusive_group()
kernel_types1.add_argument('-e1', '--step_kernel1', nargs='*', type=float,
help='Fits a step function to the kernel at kernel radius.')
kernel_types1.add_argument('-g1', '--gaussian_kernel1', nargs=1, type=float,
help='Fits a gaussian function to the kernel at kernel radius.')
kernel_types1.add_argument('-a1', '--wavelet_kernel1', nargs=1, type=float,
help='Fits a wavelet function to the kernel at kernel radius.')
kernel_types1.add_argument('-u1', '--custom_kernel1', nargs=1, type=str,
help='Fits given custom array to kernel radially.')
kernel_types1.add_argument('-b1', '--ball_kernel1', action='store_true',
help='Makes a filled sphere of radius kernel_radius.')
# these define behavior of 1st density grid
self.add_argument('-t1', '--vote_threshold1', type=float,
help='Centers with number of votes smaller than given argument '\
'will be discarded from .fits output.')
self.add_argument('-w1', '--wtd_input1', action='store_true',
help='CenterFinder will try to read a fourth column from input data '\
'and interpret said values as weights.')
con_or_over1 = self.add_mutually_exclusive_group()
con_or_over1.add_argument('-c1', '--density_contrast1', nargs='*',
help='CenterFinder will subtract the background from the galaxy '\
'density grid before voting. It will set negative weights to 0 if '\
'anything is entered after -c.')
con_or_over1.add_argument('-o1', '--overdensity1', action='store_true',
help='CenterFinder will subtract average density from the galaxy '\
'density grid before voting.')
# these define 1st kernel behavior
self.add_argument('-r0', '--kernel_radius0', type=float, help='Sets kernel radius.')
self.add_argument('--show_kernel0', action='store_true', help='Shows 1D kernel plot.')
kernel_types0 = self.add_mutually_exclusive_group()
kernel_types0.add_argument('-e0', '--step_kernel0', nargs='*', type=float,
help='Fits a step function to the kernel at kernel radius.')
kernel_types0.add_argument('-g0', '--gaussian_kernel0', nargs=1, type=float,
help='Fits a gaussian function to the kernel at kernel radius.')
kernel_types0.add_argument('-a0', '--wavelet_kernel0', nargs=1, type=float,
help='Fits a wavelet function to the kernel at kernel radius.')
kernel_types0.add_argument('-u0', '--custom_kernel0', nargs=1, type=str,
help='Fits given custom array to kernel radially.')
kernel_types0.add_argument('-b0', '--ball_kernel0', action='store_true',
help='Makes a filled sphere of radius kernel_radius.')
# these define behavior of density grid
self.add_argument('-t0', '--vote_threshold0', type=float,
help='Centers with number of votes smaller than given argument '\
'will be discarded from .fits output.')
self.add_argument('-w0', '--wtd_input0', action='store_true',
help='CenterFinder will try to read a fourth column from input data '\
'and interpret said values as weights.')
con_or_over0 = self.add_mutually_exclusive_group()
con_or_over0.add_argument('-c0', '--density_contrast0', nargs='*',
help='CenterFinder will subtract the background from the galaxy '\
'density grid before voting. It will set negative weights to 0 if '\
'anything is entered after -c.')
con_or_over0.add_argument('-o0', '--overdensity0', action='store_true',
help='CenterFinder will subtract average density from the galaxy '\
'density grid before voting.')
class CalibrationParser(ArgumentParser):
def __init__(self):
super().__init__()
self.add_argument('fileR', metavar='RANDOMS_FILE', type=str,
help='Name of .fits catalog in \'data\' with randoms to be '\
'used in the calibration procedure.')
self.add_argument('--scan', nargs=2, type=float,
help='Calibrate over given separation range. '\
'1st arg inclusive, 2nd arg exclusive.')
| 7,093 | 46.932432 | 88 | py |
conker | conker-main/src/centerfinder.py | """
Copyright (C) 2021 Gebri Mishtaku
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses.
"""
import time
import os
import json
import numpy as np
from math import inf
from astropy.io import fits
from scipy.signal import fftconvolve
from .utils import *
from .kernel import Kernel
class CenterFinder:
def __init__(
self,
galaxy_file: str,
wtd: bool,
params_file: str,
save: bool,
printout: bool,
kernel_radius: float = 110.,
kernel_type: str = 'step',
kernel_args: list = [],
vote_threshold: float = -inf,
factorize: bool = False,
):
self.kernel_radius = kernel_radius
self.kernel_type = kernel_type
self.kernel_args = kernel_args
self.show_kernel = False
self.vote_threshold = vote_threshold
self.weighted = wtd
self.factorize_randoms = factorize
self.printout = printout
self.filename = remove_ext(galaxy_file)
self.loadname = 'data/' + galaxy_file
self.save = save
self.savename = f'out_cf_{self.filename}/'
if self.save:
try:
os.mkdir(self.savename)
except FileExistsError:
pass
# loads galaxy data arrays
if not self.weighted:
self.G_ra, self.G_dec, self.G_redshift = load_data(self.loadname)
self.G_weights = np.ones(len(self.G_ra), dtype=float)
else:
self.G_ra, self.G_dec, self.G_redshift, self.G_weights \
= load_data_weighted(self.loadname)
# gets cosmology and other hyperparameters
self.cosmology, self.grid_spacing, self.rounding_precision \
= load_hyperparameters(params_file)
# calculates lookup tables for fast conversion from r to z and vice versa
self.LUT_radii, self.LUT_redshifts = interpolate_r_z(self.G_redshift.min(),
self.G_redshift.max(), self.cosmology)
self.G_radii = self.LUT_radii(self.G_redshift)
self.randoms_grid: np.ndarray = None
self.density_grid: np.ndarray = None
self.density_grid_edges = None
self.kernel: Kernel = None
self.background_grid: np.ndarray = None
self.centers_grid: np.ndarray = None
def __str__(self):
return 'CenterFinder object\n'\
f'Galaxy data file: {self.filename}\n'\
f'Kernel radius: {self.kernel_radius}\n'\
f'Vote threshold: {self.vote_threshold}\n'\
f'RA range: [{self.G_ra.min()}, {self.G_ra.max()}]\n'\
f'DEC range: [{self.G_dec.min()}, {self.G_dec.max()}]\n'\
f'Z range: [{self.G_redshift.min()}, {self.G_redshift.max()}]'
def set_kernel_radius(self, kr: float):
self.kernel_radius = kr
def get_kernel_r_idx_units(self):
return self.kernel.kernel_r_idx_units
def set_kernel_type(self, kt: str, args = None):
self.kernel_type = kt
self.kernel_args = args
def set_show_kernel(self, sk: bool):
self.show_kernel = sk
def set_vote_threshold(self, vt: float):
self.vote_threshold = vt
def set_density_grid(self, dg):
self.density_grid = dg
def get_density_grid(self):
return self.density_grid
def set_density_grid_edges(self, dge):
self.density_grid_edges = dge
def get_density_grid_edges(self):
return self.density_grid_edges
def set_randoms_grid(self, rg):
self.randoms_grid = rg
def get_randoms_grid(self):
return self.randoms_grid
def set_kernel(self, k):
self.kernel = k
def get_kernel(self):
return self.kernel
def set_centers_grid(self, cg):
self.centers_grid = cg
def get_centers_grid(self):
return self.centers_grid
def set_background_grid(self, bg):
self.background_grid = bg
def get_background_grid(self):
return self.background_grid
def make_histo_grid(self):
xyzs = sky2cartesian(self.G_ra, self.G_dec, self.G_redshift, self.LUT_radii) # galaxy x, y and z coords
self.galaxies_cartesian = np.array(xyzs).T # each galaxy is represented by (x, y, z)
# for cf1 and cf0 types in conker
if not self.density_grid_edges:
# gets the 3d histogram (density_grid) and the grid bin coordintes in cartesian (grid_edges)
bin_counts_3d = np.array([np.ceil((xyzs[i].max() - xyzs[i].min()) / self.grid_spacing)
for i in range(len(xyzs))], dtype=int)
# histograms the data points in real space with given weights
self.density_grid, self.density_grid_edges = np.histogramdd(
self.galaxies_cartesian,
bins=bin_counts_3d,
weights=self.G_weights
)
# for cfR, cf1 (cf1 w/ x-corr, diff bounds only) types in conker
else:
self.density_grid, self.density_grid_edges = np.histogramdd(
self.galaxies_cartesian,
bins=self.density_grid_edges,
weights=self.G_weights
)
if self.printout:
print('Histogramming completed successfully...')
print('Histogram grid shape:', self.density_grid.shape)
return np.sum(self.density_grid)
def make_randoms_grid(self):
if not self.randoms_grid:
if self.factorize_randoms:
self.randoms_grid = self._project_and_sample(
self.density_grid,
self.density_grid_edges
)
else:
self.randoms_grid = self.density_grid
def make_density_grid(self, dencon, overden):
# density contrast: subtracts randoms grid (non-conv background)
if dencon[0]:
self.density_grid -= self.randoms_grid
# keep or discard negative valued weights
# dencon[1] set to True means keep negative weights
if not dencon[1]:
if self.printout:
print('Discarding all negative weights in density grid...')
self.density_grid[self.density_grid < 0.] = 0.
if self.printout:
print('Background subtraction completed successfully...')
# calculates avg density of all nonempty grid cells of the
# weighted density field and subtracts it from the density field
elif overden:
denavg = np.average(self.density_grid[self.density_grid!=0])
self.density_grid[self.density_grid!=0] -= denavg
self.density_grid[self.density_grid!=0] /= denavg
if self.printout:
print('Overdensity calculation completed successfully...')
if self.printout:
print('Minimum and maximum values of density field grid cells:\n',
'[{}, {}]'.format(self.density_grid.min(), self.density_grid.max()))
def _convolve_density_grid(self):
"""
Convolves density grid and sets it as centers grid (signal grid).
"""
# makes the kernel for scanning over the density grid
self.kernel = Kernel(self.kernel_type, self.kernel_radius, self.grid_spacing,
self.printout, self.show_kernel, *self.kernel_args)
# this scans the kernel over the whole volume of the galaxy density grid
# calculates the tensor inner product of the two at each step
# and finally stores this value as the number of voters per that bin in the centers grid
self.centers_grid = np.round(
fftconvolve(self.density_grid, self.kernel.get_grid(), mode='same'),
decimals = self.rounding_precision)
if self.printout:
print('Convolution of density grid completed successfully...')
print('Signal grid shape:', self.centers_grid.shape)
print('Maximum value per single bin in signal grid W:', self.centers_grid.max())
print('Minimum value per single bin in signal grid W:', self.centers_grid.min())
# save whole grid without a vote cut
if self.save:
np.save(self.savename + f'convolved_density_grid_r_{self.kernel_radius}'
f'_t_{self.vote_threshold}.npy', self.centers_grid)
def _convolve_randoms_grid(self):
"""
Convolves randoms grid and sets it as background grid.
"""
# needed for background in conker
self.background_grid = np.round(
fftconvolve(self.randoms_grid, self.kernel.get_grid(), mode='same'),
decimals = self.rounding_precision)
if self.printout:
print('Convolution of randoms grid completed successfully...')
print('Background grid shape:', self.background_grid.shape)
print('Maximum value per single bin in background grid B:', self.background_grid.max())
print('Minimum value per single bin in background grid B:', self.background_grid.min())
# save whole grid without a vote cut
if self.save:
np.save(self.savename + f'background_grid_r_{self.kernel_radius}'
f'_t_{self.vote_threshold}.npy', self.background_grid)
def _project_and_sample(self, grid: np.ndarray, grid_edges: list) -> np.ndarray:
# TODO: these are unnecessary, remove and reference self's attribute
bin_centers_edges_xs, bin_centers_edges_ys, bin_centers_edges_zs = \
np.array([(grid_edges[i][:-1] + grid_edges[i][1:]) / 2 for i in range(len(grid_edges))])
# TODO: remove, unnecessary
# if self.save:
# np.save(self.savename + '_xbins.npy', bin_centers_edges_xs)
# np.save(self.savename + '_ybins.npy', bin_centers_edges_ys)
# np.save(self.savename + '_zbins.npy', bin_centers_edges_zs)
bin_centers_xs, bin_centers_ys, bin_centers_zs = np.array([(x, y, z)
for x in bin_centers_edges_xs
for y in bin_centers_edges_ys
for z in bin_centers_edges_zs]).T
del bin_centers_edges_xs, bin_centers_edges_ys, bin_centers_edges_zs
if self.printout:
print('Number of bin centers in cartesian coordinates:', len(bin_centers_xs))
"""
Why can we be sure that it is okay to interpolate the radii
and redshift values for these bin centers coordinates?
Because we know that the range of values of the bin centers
is exactly in between the min and the max of the grid bin
edges x, y, z. The radii come from the 3d euclidian distance,
which preserves this relationship (convex function of x,y,z),
and thus it is fine to use the beforehand-calculated interpolation
lookup table to find the redshifts from the radii.
"""
bin_centers_ra, bin_centers_dec, _, bin_centers_radii = \
cartesian2sky(bin_centers_xs,
bin_centers_ys,
bin_centers_zs,
self.LUT_redshifts,
self.G_ra.min(),
self.G_ra.max())
del bin_centers_xs, bin_centers_ys, bin_centers_zs
if self.printout:
print('Number of bin centers in sky coordinates:', len(bin_centers_ra))
# sum total of weights
N_tot = np.sum(grid)
if self.printout:
print('Sum total of weights:', N_tot)
start = time.time()
# get volume adjustment grid, the differentials in sky coordinate dimensions
# and the number of bins in each dimension
vol_adjust_ratio_grid, d_r, d_alpha, d_delta, N_bins_r, N_bins_alpha, N_bins_delta = \
self._volume_adjustment(bin_centers_radii, bin_centers_ra, bin_centers_dec, grid.shape)
end = time.time()
print(f'\nvolume_adjustment took time: {end-start} seconds\n')
start = time.time()
# alpha-delta and z counts
N_bins_x, N_bins_y, N_bins_z = grid.shape[0], grid.shape[1], grid.shape[2]
sky_coords_grid_shape = (N_bins_x, N_bins_y, N_bins_z, 3) # need to store a triple at each grid bin
sky_coords_grid = np.array(list(zip(bin_centers_ra, bin_centers_dec,
bin_centers_radii))).reshape(sky_coords_grid_shape)
if self.printout:
print('Shape of grid containing sky coordinates of observed grid\'s bin centers:',
sky_coords_grid.shape)
end = time.time()
print(f'\ncreating sky_coords_grid took time: {end-start} seconds\n')
start = time.time()
# getting some variables ready for the projection step
alpha_min = bin_centers_ra.min()
d_alpha = np.rad2deg(d_alpha)
delta_min = bin_centers_dec.min()
d_delta = np.rad2deg(d_delta)
r_min = bin_centers_radii.min()
del bin_centers_ra, bin_centers_dec, bin_centers_radii
# vectorial computation of the sky indices
sky_coords_grid[:, :, :, 0] = (sky_coords_grid[:, :, :, 0] - alpha_min) // d_alpha
sky_coords_grid[:, :, :, 1] = (sky_coords_grid[:, :, :, 1] - delta_min) // d_delta
sky_coords_grid[:, :, :, 2] = (sky_coords_grid[:, :, :, 2] - r_min) // d_r
sky_coords_grid = sky_coords_grid.astype(int)
# TODO: the condition here should be >= rather than ==
# the following fixes any indices that lie beyond the outer
# walls of the sky grid by pulling them to the wall
sky_coords_grid[:, :, :, 0][sky_coords_grid[:, :, :, 0] == N_bins_alpha] = N_bins_alpha - 1
sky_coords_grid[:, :, :, 1][sky_coords_grid[:, :, :, 1] == N_bins_delta] = N_bins_delta - 1
sky_coords_grid[:, :, :, 2][sky_coords_grid[:, :, :, 2] == N_bins_r] = N_bins_r - 1
end = time.time()
print(f'\nmodifying sky_coords_grid took time: {end-start} seconds\n')
start = time.time()
alpha_delta_grid, r_grid = self._alpha_delta_r_projections_from_grid(grid,
N_bins_x, N_bins_y, N_bins_z, sky_coords_grid, N_bins_alpha, N_bins_delta, N_bins_r)
end = time.time()
print(f'\nalpha_delta_r_projections took time: {end-start} seconds\n')
if self.printout:
print('Shape of alpha-delta grid:', alpha_delta_grid.shape)
print('Shape of r grid:', r_grid.shape)
print('Maximum value per single bin in alpha-delta grid:', alpha_delta_grid.max())
print('Minimum value per single bin in alpha-delta grid:', alpha_delta_grid.min())
print('Maximum value per single bin in r grid:', r_grid.max())
print('Minimum value per single bin in r grid:', r_grid.min())
print('N_tot_observed = N_tot_alpha_delta = N_tot_r:',
N_tot == np.sum(alpha_delta_grid) == np.sum(r_grid))
start = time.time()
i = np.arange(N_bins_x)[:,None,None]
j = np.arange(N_bins_y)[None,:,None]
k = np.arange(N_bins_z)[None,None,:]
randoms_grid = alpha_delta_grid[sky_coords_grid[i,j,k,0], sky_coords_grid[i,j,k,1]] \
* r_grid[sky_coords_grid[i,j,k,2]]
end = time.time()
print(f'\nrandoms_grid took time: {end-start} seconds\n')
randoms_grid /= N_tot # normalization
randoms_grid *= vol_adjust_ratio_grid # volume adjustment
if self.printout:
print('Randoms grid shape:', randoms_grid.shape)
print('Maximum value in randoms grid bin:', randoms_grid.max())
print('Minimum value in randoms grid bin:', randoms_grid.min())
if self.save:
np.save(self.savename + "_randoms_grid.npy", randoms_grid)
return randoms_grid
def _volume_adjustment(self, bin_centers_radii: np.array, bin_centers_ra: np.array,
bin_centers_dec: np.array, observed_grid_shape: tuple):
# radius
mid_r = (bin_centers_radii.max() + bin_centers_radii.min()) / 2
delta_r = bin_centers_radii.max() - bin_centers_radii.min()
N_bins_r = int(np.ceil(delta_r / self.grid_spacing))
d_r = self.grid_spacing
r_sqr = bin_centers_radii ** 2
# alpha
delta_alpha = np.deg2rad(bin_centers_ra.max() - bin_centers_ra.min())
N_bins_alpha = int(np.ceil((delta_alpha * mid_r) / self.grid_spacing))
d_alpha = delta_alpha / N_bins_alpha
# delta
delta_delta = np.deg2rad(bin_centers_dec.max() - bin_centers_dec.min())
N_bins_delta = int(np.ceil((delta_delta * mid_r) / self.grid_spacing))
d_delta = delta_delta / N_bins_delta
cos_delta = np.cos(np.deg2rad(bin_centers_dec))
# angular volume differential
dV_ang = d_alpha * cos_delta * d_delta * r_sqr * d_r
# euclidean volume differential
dV_xyz = self.grid_spacing ** 3
# volume adjustment ratio grid; contains the volume adjustment ratio
# per each bin in the expected grid
vol_adjust_ratio_grid = (dV_xyz / dV_ang).reshape(observed_grid_shape)
if self.printout:
print('Number of bins in r:', N_bins_r)
print('Number of bins in alpha:', N_bins_alpha)
print('Number of bins in delta:', N_bins_delta)
print('Volume adjustment ratio grid shape:', vol_adjust_ratio_grid.shape)
return vol_adjust_ratio_grid, d_r, d_alpha, d_delta, N_bins_r, N_bins_alpha, N_bins_delta
def _alpha_delta_r_projections_from_grid(self,
density_grid: np.ndarray,
N_bins_x: int, N_bins_y: int, N_bins_z: int,
sky_coords_grid: np.ndarray,
N_bins_alpha: int, N_bins_delta: int, N_bins_r: int) \
-> (np.ndarray, np.ndarray):
alpha_delta_grid, _, _ = np.histogram2d(sky_coords_grid[:,:,:,0].ravel(),
sky_coords_grid[:,:,:,1].ravel(),
bins=(N_bins_alpha, N_bins_delta),
weights=density_grid.ravel())
r_grid, _ = np.histogram(sky_coords_grid[:,:,:,2].ravel(),
bins=N_bins_r,
weights=density_grid.ravel())
return alpha_delta_grid, r_grid
def cleanup(self):
del self.centers_grid
del self.background_grid
del self.kernel_radius
def make_grids(self,
dencon: (bool,bool) = (False, False),
overden: bool = False
):
"""Creates density and randoms grids. """
self.make_histo_grid()
self.make_randoms_grid()
self.make_density_grid(dencon, overden)
def make_convolved_grids(self):
"""Creates convolved density (signal) and background grids. """
self._convolve_density_grid()
self._convolve_randoms_grid()
def find_centers(self, dencon: bool, overden: bool, cleanup: bool = True):
"""
Identifies BAO centers by applying the voting procedure.
Applies vote threshold and saves the found centers list as .fits catalog.
"""
if self.printout:
print(self)
self._make_grids(dencon=dencon, overden=overden)
self._convolve_density_grid()
# TODO: clean this up
# self.centers_grid[self.centers_grid < self.vote_threshold] = 0
self.centers_indices = np.asarray(self.centers_grid >= self.vote_threshold).nonzero()
self.C_weights = self.centers_grid[self.centers_indices]
if self.printout:
precut = len(self.centers_grid[self.centers_grid!=0])
postcut = len(self.C_weights)
print('Number of found centers before vote cut:', precut)
print('Number of found centers after vote cut:', postcut)
if cleanup:
delattr(self, 'centers_grid')
# calculates center coords to be exactly at the center of their respective bins
centers_bin_coords = np.array([(self.density_grid_edges[i][:-1] \
+ self.density_grid_edges[i][1:]) / 2
for i in range(len(self.density_grid_edges))])
if cleanup:
delattr(self, 'density_grid_edges')
C_xyzs = np.array([centers_bin_coords[i][self.centers_indices[i]] \
for i in range(len(self.centers_indices))])
self.C_ra, self.C_dec, self.C_redshift, _ = cartesian2sky(*C_xyzs, self.LUT_redshifts,
self.G_ra.min(), self.G_ra.max())
# outputs centers catalog in skycoords+weights to out folder
if self.kernel_type=='ball':
savename = self.savename + f'ball_found_centers_r_{self.kernel_radius}_cut_{self.vote_threshold}.fits'
else:
savename = self.savename + f'found_centers_r_{self.kernel_radius}_cut_{self.vote_threshold}.fits'
save_data_weighted(savename, self.C_ra, self.C_dec, self.C_redshift, self.C_weights)
| 18,655 | 32.67509 | 105 | py |
conker | conker-main/src/utils.py | """
Copyright (C) 2020 Gebri Mishtaku
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses.
"""
import json
import numpy as np
from astropy.io import fits
from scipy import integrate
from scipy.interpolate import InterpolatedUnivariateSpline
def remove_ext(s: str):
"""Deletes the .fits extension and
allows for other '.'s in the args.file string
"""
return '.'.join(s.split('.')[:-1])
def load_hyperparameters(params_file: str):
with open(params_file, 'r') as params:
hp = json.load(params)
c = hp['c'] # km/s
H0 = hp['H0']
c_over_H0 = c / H0
h_ = H0 / 100.
Omega_M = hp['Omega_M']
Omega_K = hp['Omega_K']
if 'Omega_L' not in hp:
Omega_L = 1 - Omega_M - Omega_K
else:
Omega_L = hp['Omega_L']
grid_spacing = hp['grid_spacing'] # h-1Mpc
rounding_precision = hp['rounding_precision']
cosmology = h_, c_over_H0, Omega_M, Omega_K, Omega_L
return cosmology, grid_spacing, rounding_precision
def load_data(filename: str) -> (np.recarray,)*3:
with fits.open(filename) as catalog_data:
catalog_data = catalog_data[1].data
ra = catalog_data['ra']
dec = catalog_data['dec']
redshift = catalog_data['z']
return ra, dec, redshift
def load_data_weighted(filename: str) -> (np.recarray,)*4:
with fits.open(filename) as catalog_data:
catalog_data = catalog_data[1].data
ra = catalog_data['ra']
dec = catalog_data['dec']
redshift = catalog_data['z']
weight = catalog_data['wts']
return ra, dec, redshift, weight
def save_data(filename: str, ra: np.array, dec: np.array, z: np.array):
racol = fits.Column(name='ra', array=ra, format='E')
deccol = fits.Column(name='dec', array=dec, format='E')
zcol = fits.Column(name='z', array=z, format='E')
t = fits.BinTableHDU.from_columns([racol, deccol, zcol])
t.writeto(filename, overwrite=True)
def save_data_weighted(filename: str, ra: np.array, dec: np.array, z: np.array, w: np.array):
# 'E' is the fits format for single-precision floating point
racol = fits.Column(name='ra', array=ra, format='E')
deccol = fits.Column(name='dec', array=dec, format='E')
zcol = fits.Column(name='z', array=z, format='E')
wcol = fits.Column(name='wts', array=w, format='E')
t = fits.BinTableHDU.from_columns([racol, deccol, zcol, wcol])
t.writeto(filename, overwrite=True)
def z2r(z: np.array, cosmology: tuple) -> float:
"""Transforms observed redshift to radial distance. """
h_, c_over_H0, Omega_M, Omega_K, Omega_L = cosmology
# multiply by lil h to transform from Mpc to h-1Mpc
const = h_ * c_over_H0
return const * integrate.quad(lambda z_:
(Omega_M*(1+z_)**3 + Omega_K*(1+z_)**2 + Omega_L)**-0.5, 0, z)[0]
def interpolate_r_z(redshift_min: float, redshift_max: float, cosmology: tuple):
# TODO: automate getting the number of spacings instead of setting it to 100
# Creates tick values for the given range of redshifts
redshift_ticks = np.linspace(redshift_min, redshift_max, 100)
radii_ticks = np.array([z2r(z, cosmology) for z in redshift_ticks])
# creates lookup tables with interpolated radii values
LUT_radii = InterpolatedUnivariateSpline(redshift_ticks, radii_ticks)
LUT_redshifts = InterpolatedUnivariateSpline(radii_ticks, redshift_ticks)
return LUT_radii, LUT_redshifts
def sky2cartesian(ra: np.array, dec: np.array, redshift: np.array, LUT_radii) \
-> (np.array, np.array, np.array):
# note that polar angle = pi/2 - declination
# sin(polar) = cos(90-polar) = cos(dec)
# cos(polar) = sin(90-polar) = sin(dec)
radii = np.array([LUT_radii(z) for z in redshift])
xs = radii * np.cos(np.deg2rad(dec)) * np.cos(np.deg2rad(ra))
ys = radii * np.cos(np.deg2rad(dec)) * np.sin(np.deg2rad(ra))
zs = radii * np.sin(np.deg2rad(dec))
return xs, ys, zs
def cartesian2sky(xs: np.array, ys: np.array, zs: np.array, LUT_redshifts,
ramin: float, ramax: float) -> (np.array,)*4:
radii = (xs ** 2 + ys ** 2 + zs ** 2) ** 0.5
redshift = np.array(LUT_redshifts(radii))
dec = np.rad2deg(np.arcsin(zs / radii))
# TODO: fix this hack
rafix = np.round((ramin+ramax)/2 //90) *90
ra = np.rad2deg(np.arctan(ys / xs)) + rafix
return ra, dec, redshift, radii
| 4,645 | 34.19697 | 93 | py |
conker | conker-main/src/correlator.py | """
Copyright (C) 2021 Gebri Mishtaku
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses.
"""
import sys, time
import numpy as np
import matplotlib.pyplot as plt
from .centerfinder import CenterFinder
from .utils import remove_ext
class Correlator:
def __init__(
self,
order: int,
file1: str,
file0: str = None,
fileR: str = None,
file_gridR: str = None,
nondiag: bool = False,
params_file: str = 'params.json',
save: bool = False,
save_randoms = False,
savename: str = None,
printout: bool = False
):
try:
assert order >= 2
except AssertionError:
print('AssertionError: '\
f'Correlation order has to be >= 2, was given {order}')
sys.exit(1)
try:
assert not ((order == 2) and nondiag)
except AssertionError:
print('AssertionError: '\
f'Can\'t have a nondiagonal correlation of order 2')
sys.exit(1)
self.order = order
self.file1 = file1
# auto-correlation on default
self.type = 'auto'
# cross-correlation if file0 provided
if file0:
self.type = 'cross'
self.file0 = file0
# makes background from randoms catalog
self.fileR = fileR
# loads randoms grid from file
self.file_gridR = file_gridR
self.func_type = 'diag'
if nondiag:
self.func_type = 'nondiag'
# default calibration reference
# this just serves as a default
# and may be overridden in load_calib()
self.calibfile = 'calib_ref_random0_DR12v5_CMASS_South_gs_5.npy'
self.calib: float = None
# # correlation order has to match radii given
# assert order == len(r1_list) + 1
# self.r1_list = r1_list
self.params_file = params_file
self.save = save
self.save_randoms = save_randoms
self.savename = savename
self.printout = printout
self.cfR: CenterFinder = None
self.cf0: CenterFinder = None
self.cf1: CenterFinder = None
self.dencon_args0: tuple = None
self.dencon_args1: tuple = None
self.density_grid: np.ndarray = None
self.randoms_grid: np.ndarray = None
self.W0: np.ndarray = None
self.B0: np.ndarray = None
self.W1_prod: np.ndarray = None
self.B1_prod: np.ndarray = None
self.corrfunc: dict = None
self.ND: float = None
self.NR: float = None
def set_cfR(self, cfR):
self.cfR = cfR
def get_cfR(self):
return self.cfR
def set_cf0(self, cf0):
self.cf0 = cf0
def get_cf0(self):
return self.cf0
def set_cf1(self, cf1):
self.cf1 = cf1
def get_cf1(self):
return cf1
def make_cfR(self, dge):
if self.file_gridR:
self.cfR.set_randoms_grid(np.load('data/'+self.file_gridR))
self.randoms_grid = self.cfR.get_randoms_grid()
else:
# cfR only needs to build the randoms grid
self.cfR.set_density_grid_edges(dge) # from cf0
self.NR = self.cfR.make_histo_grid()
self.cfR.make_randoms_grid()
# normalizes the randoms grid
# sets randoms grid for use with cf0 and cf1
data2rand_ratio = self.ND / self.NR
self.randoms_grid = self.cfR.get_randoms_grid()\
* data2rand_ratio
if self.save_randoms:
np.save(self.savename + 'gridR_{}_gs_{}.npy'\
.format(remove_ext(self.fileR), self.cf0.grid_spacing),
self.randoms_grid)
def _customize_cf0(self, args):
# defaults kernel_radius to 1/2 grid_spacing for 0th centerfinder
self.cf0.set_kernel_radius(self.cf0.grid_spacing / 2)
if args.kernel_radius0:
self.cf0.set_kernel_radius(args.kernel_radius0)
if args.show_kernel0:
self.cf0.set_show_kernel(args.show_kernel0)
if args.step_kernel0:
self.cf0.set_kernel_type('step', args.step_kernel0)
elif args.gaussian_kernel0:
self.cf0.set_kernel_type('gaussian', args.gaussian_kernel0)
elif args.wavelet_kernel0:
self.cf0.set_kernel_type('wavelet', args.wavelet_kernel0)
elif args.custom_kernel0:
self.cf0.set_kernel_type('custom', args.custom_kernel0)
if args.vote_threshold0:
self.cf0.set_vote_threshold(args.vote_threshold0)
if args.density_contrast0:
do_dencon0 = True
if len(args.density_contrast0)==0:
keep_neg_wts0 = True
else:
keep_neg_wts0 = False
else:
do_dencon0 = True
keep_neg_wts0 = True
self.dencon_args0 = (do_dencon0, keep_neg_wts0)
def prep_cf0(self, args):
# customizes the cf object and the bckgrnd subtraction
self._customize_cf0(args)
self.ND = self.cf0.make_histo_grid()
def make_cf0(self, args):
self.cf0.set_randoms_grid(self.randoms_grid)
self.cf0.make_density_grid(dencon=self.dencon_args0,
overden=args.overdensity0)
# if auto-correlation, keeps density so it calculates only once
if self.type == 'auto':
self.density_grid = self.cf0.get_density_grid()
# there's no need for convolving
# if the whole kernel is just one cell
if self.cf0.kernel_radius == self.cf0.grid_spacing/2:
self.W0 = self.cf0.get_density_grid()
self.B0 = self.cf0.get_randoms_grid()
# runs the centerfinding algorithm
else:
self.cf0.make_convolved_grids()
self.W0 = self.cf0.get_centers_grid()
self.B0 = self.cf0.get_background_grid()
def _customize_cf1(self, args):
if args.kernel_radius1:
self.cf1.set_kernel_radius(args.kernel_radius1)
if args.show_kernel1:
self.cf1.set_show_kernel(args.show_kernel1)
if args.step_kernel1:
self.cf1.set_kernel_type('step', args.step_kernel1)
elif args.gaussian_kernel1:
self.cf1.set_kernel_type('gaussian', args.gaussian_kernel1)
elif args.wavelet_kernel1:
self.cf1.set_kernel_type('wavelet', args.wavelet_kernel1)
elif args.custom_kernel1:
self.cf1.set_kernel_type('custom', args.custom_kernel1)
if args.vote_threshold1:
self.cf1.set_vote_threshold(args.vote_threshold1)
if args.density_contrast1:
do_dencon1 = True
if len(args.density_contrast1)==0:
keep_neg_wts1 = True
else:
keep_neg_wts1 = False
else:
do_dencon1 = True
keep_neg_wts1 = True
self.dencon_args1 = (do_dencon1, keep_neg_wts1)
def make_cf1(self, args):
# customizes the cf object and the bckgrnd subtraction
self._customize_cf1(args)
self.cf1.set_randoms_grid(self.randoms_grid) # from cfR
# doesn't recalculate density grid if auto-cor
if self.type == 'auto':
self.cf1.set_density_grid(self.density_grid)
# runs the centerfinding algorithm again if x-cor
# note cf0 boundaries override cf1 if different in x-corr
else:
self.cf1.set_density_grid_edges(self.cf0.get_density_grid_edges())
self.cf1.make_grids(dencon=self.dencon_args1,
overden=args.overdensity1)
@staticmethod
def _reduce_mult_till_order(cf1: CenterFinder, order: int) \
-> (np.ndarray, np.ndarray):
"""DEPRECATED METHOD
Reduction that maps high order corrfunc over same object.
Note this acts like a reduction but the args list is
created dynamically, i.e. W1, W1-1, W1-2 ... W1-(n-2)
Strictly for use on Correlator instance's cf1 object.
"""
W1_prod = cf1.get_centers_grid()
B1_prod = cf1.get_background_grid()
for i in range(1, order-1):
W1_prod *= (W1_prod - i)
B1_prod *= (B1_prod - i)
return W1_prod, B1_prod
def _make_W1_B1(self):
self.cf1.make_convolved_grids()
W1 = self.cf1.get_centers_grid()
B1 = self.cf1.get_background_grid()
return W1, B1
def _correlate_diag(self):
W1, B1 = self._make_W1_B1()
W = self.W0 * W1 ** (self.order - 1)
B = self.B0 * B1 ** (self.order - 1)
if self.save:
r1, r0 = self.cf1.kernel_radius, self.cf0.kernel_radius
np.save(self.savename + '{}pcf_W_r1_{}_r0_{}.npy'\
.format(self.order, r1, r0), W)
np.save(self.savename + '{}pcf_B_r1_{}_r0_{}.npy'\
.format(self.order, r1, r0), B)
self.cf1.cleanup()
W = np.sum(W)
B = np.sum(B)
return W, B
@staticmethod
def _multistep_product_from_dict(dict, steps_list):
product = 1
for s in steps_list:
product *= dict[s]
return product
@staticmethod
def _steps_list_to_idx_tuple(steps, steps_list):
return tuple((np.argwhere(steps==s)[0,0] for s in steps_list))
def _recurse_dimensions_correlate(self,
steps,
running_steps_list,
maxdims,
currdim,
loopidx_start):
# ready to fill cell in W_hyperarr and B_hyperarr
if currdim == maxdims:
steps_idx_list = Correlator._steps_list_to_idx_tuple(steps, running_steps_list)
print('Current separations and their idxs:\t{}\t{}'\
.format(running_steps_list, steps_idx_list))
W = np.sum(self.W0 *
Correlator._multistep_product_from_dict(
self.W1_dict, running_steps_list))
B = np.sum(self.B0 *
Correlator._multistep_product_from_dict(
self.B1_dict, running_steps_list))
# TODO: save W and B as an array and do the ratio only in the end
self.corrfunc_hyperarr[steps_idx_list] = W / B
# recurses to further dimensions
else:
for i, s in enumerate(steps[loopidx_start:]):
new_running_steps_list = running_steps_list + [s]
self._recurse_dimensions_correlate(
steps,
new_running_steps_list,
maxdims,
currdim+1,
i)
def save_single(self):
separation = np.array(list(self.corrfunc.keys()))
correlation = np.array(list(self.corrfunc.values()))
if self.printout:
print('Separation value:\n', separation)
print('Correlation value:\n', correlation)
np.save(self.savename + '{}pcf_sep_{}.npy'\
.format(self.order, separation[0]), separation)
np.save(self.savename + '{}pcf_corr_{}.npy'\
.format(self.order, separation[0]), correlation)
def single_correlate(self):
s = self.cf1.kernel_radius
self.corrfunc = {s: None}
W, B = self._correlate_diag()
self.corrfunc[s - self.calib] = W / B
self.save_single()
def _save_scan(self, scan_args):
losep, hisep = scan_args
separation = np.array(list(self.corrfunc.keys()))
correlation = np.array(list(self.corrfunc.values()))
if self.printout:
print('Separation array:\n', separation)
print('Correlation array:\n', correlation)
np.save(self.savename + '{}pcf_sep_range_{}_{}.npy'\
.format(self.order, losep, hisep), separation)
np.save(self.savename + '{}pcf_corr_range_{}_{}.npy'\
.format(self.order, losep, hisep), correlation)
def _save_plot(self, scan_args):
losep, hisep = scan_args
separation = list(self.corrfunc.keys())
correlation = list(self.corrfunc.values())
plt.plot(separation, correlation,
label='conker r0={} gs={}'\
.format(self.cf0.kernel_radius, self.cf0.grid_spacing))
plt.title('ConKer: {} {} {}pcf'\
.format(
remove_ext(self.file1),
'weighted' if self.cf1.weighted else 'unweighted',
self.order
)
)
plt.xlabel(r'$s$ $[h^{-1}Mpc]$')
plt.ylabel(r'$\xi(s)$')
plt.grid(linestyle=':')
plt.legend()
plt.savefig('plots/' + '{}pcf_scan_{}_{}_{}.png'\
.format(self.order, losep, hisep, remove_ext(self.file1)),
dpi=300)
def scan_correlate_diag(self, scan_args):
start, end = scan_args
step = self.cf1.grid_spacing
steps = np.arange(start, end+step, step)
self.corrfunc = {}
for s in steps:
self.cf1.set_kernel_radius(s)
W, B = self._correlate_diag()
self.corrfunc[s - self.calib] = W / B
np.save(self.savename + '{}pcf_W_r1_{}.npy'\
.format(self.order, s), W)
np.save(self.savename + '{}pcf_B_r1_{}.npy'\
.format(self.order, s), B)
self._save_scan(scan_args)
if self.func_type == 'diag':
self._save_plot(scan_args)
def scan_correlate_nondiag(self, scan_args):
losep, hisep = scan_args
step = self.cf1.grid_spacing
steps = np.arange(losep, hisep+step, step)
steps_calib = steps - self.calib # TODO: maybe the index error comes from here?
self.W1_dict, self.B1_dict = {}, {} # TODO: instantiate these in constructor
if self.printout:
print('\n\n\nStarting correlation along main diagonal...\n')
start = time.time()
# TODO: chunk this up to store just neighborhoods as it fills upwards in s
# fills up W1_dict and B1_dict for each step
for s in steps_calib:
self.cf1.set_kernel_radius(s)
self.W1_dict[s], self.B1_dict[s] = self._make_W1_B1()
end = time.time()
print(f'\nCorrelation on main diagonal took time: {end-start} seconds\n')
if self.printout:
print('\n\n\nCalculating correlation everywhere...\n')
start = time.time()
# fills up hyperarray sparsely
hyperarr_idx_levels = self.order - 1
hyperarr_shape = steps.shape * hyperarr_idx_levels
self.corrfunc_hyperarr = np.empty(hyperarr_shape)
self._recurse_dimensions_correlate(steps_calib, [], hyperarr_idx_levels, 0, 0)
end = time.time()
print(f'\nCorrelation everywhere took time: {end-start} seconds\n')
if self.printout:
print('Shape of complete correlation function:', self.corrfunc_hyperarr.shape)
np.round(self.corrfunc_hyperarr, decimals=self.cf1.rounding_precision)
# TODO: flip the hyperarrays across all symmetry lines
# to fill up the whole corrfunc for arbitrary order
if self.order==3:
full = self.corrfunc_hyperarr + self.corrfunc_hyperarr.T
for i in range(len(steps_calib)):
full[i,i] = self.corrfunc_hyperarr[i,i]
self.corrfunc_hyperarr = full
np.save(self.savename + '{}pcf_nondiag_1dbins_range_{}_{}.npy'\
.format(self.order, losep, hisep), steps_calib)
np.save(self.savename + '{}pcf_nondiag_corrfunc_range_{}_{}.npy'\
.format(self.order, losep, hisep), self.corrfunc_hyperarr)
def load_calib(self):
if self.printout:
print('\n\n\nLoading calibration data...\n')
try:
if self.fileR:
calibfile = 'calibration/' + 'calib_ref_{}_gs_{}.npy'\
.format(remove_ext(self.fileR), self.cf1.grid_spacing)
elif self.file_gridR:
# reconstructs expected calib filename from gridR filename
# changes expected file_gridR model: 'gridR_random0_DR12v5_CMASS_South_gs_5.npy'
# to make: 'calib_ref_random0_DR12v5_CMASS_South_gs_5.npy'
fname = '_'.join(self.file_gridR.split('_')[1:])
calibfile = 'calibration/calib_ref_' + fname
self.calib = np.load(calibfile)
self.calibfile = calibfile # success
if self.printout:
print('Loaded CUSTOM calibration data from file:\n', self.calibfile)
except:
self.calib = np.load(self.calibfile)
if self.printout:
print('Loaded DEFAULT calibration data from file:\n', self.calibfile)
| 14,519 | 28.156627 | 84 | py |
conker | conker-main/src/plotter.py | import numpy as np
import matplotlib.pyplot as plt
plt.rc('figure', figsize=[9,9])
plt.rc('font', family='serif')
plt.rc('axes', titlesize=18)
plt.rc('axes', labelsize=12)
plt.rc('xtick', top=True)
plt.rc('xtick.minor', visible=True)
plt.rc('ytick', right=True)
plt.rc('ytick.minor', visible=True)
def _plot_slice(cf, bounds):
# user specified bounds on all coords
if len(bounds)==6:
zlow, zhi, ralow, rahi, declow, dechi = bounds
# specified bounds only on redshift, max bounds on ra and dec
elif len(bounds)==2:
zlow, zhi = bounds
ralow, rahi = cf.G_ra.min(), cf.G_ra.max()
declow, dechi = cf.G_dec.min(), cf.G_dec.max()
# specified no bounds, max bounds assumed on all coords
elif len(bounds)==0:
zlow, zhi = cf.G_redshift.min(), cf.G_redshift.max()
ralow, rahi = cf.G_ra.min(), cf.G_ra.max()
declow, dechi = cf.G_dec.min(), cf.G_dec.max()
else:
raise ValueError('Plotting Error: Number of plotting function arguments '\
f'= {len(bounds)}, but expected either 0, 2 or 6')
if zlow>zhi or ralow>rahi or declow>dechi:
raise ValueError('Plotting Error: Lower bound of a coordinate '\
'cannot be greater than higher bound of said coordinate.')
# cannot plot 0 centers
if len(cf.C_weights)==0:
raise ValueError('Plotting Error: Centers list is empty.')
fig, ax = plt.subplots()
# gets centers to plot
slice_indices = np.asarray((cf.C_redshift>=zlow) & (cf.C_redshift<=zhi)).nonzero()
ra_slice, dec_slice = cf.C_ra[slice_indices], cf.C_dec[slice_indices]
wts_slice = cf.C_weights[slice_indices]
h, ra, dec, _ = ax.hist2d(ra_slice, dec_slice, weights=wts_slice, bins=100,
range=[[ralow,rahi],[declow,dechi]], alpha=0.7)
# gets galaxies to plot
slice_indices = np.asarray((cf.G_redshift>=zlow) & (cf.G_redshift<=zhi)
& (cf.G_ra>=ralow) & (cf.G_ra<=rahi)
& (cf.G_dec>=declow) & (cf.G_dec<=dechi)).nonzero()
ra_slice, dec_slice = cf.G_ra[slice_indices], cf.G_dec[slice_indices]
ax.plot(ra_slice, dec_slice, 'r*', ms=4)
# plots circles around hihgly voted centers
cell_density_floor = .8*h.max()
circ_indices = np.asarray(h>=cell_density_floor).nonzero()
# bounds the number of circles on screen
while len(circ_indices[0])>20:
cell_density_floor*=1.01
circ_indices = np.asarray(h>=cell_density_floor).nonzero()
ra, dec = ra[circ_indices[0]], dec[circ_indices[1]]
circ_centers = list(zip(ra, dec))
for xy in circ_centers:
# TODO: automatize calculation for angle subtended by R0 given z
ax.add_artist(plt.Circle(xy, 4.5, ls='--', fill=False, alpha=.5))
plt.title('Plot of centers and galaxies between $z={:.2f}$ and '\
'$z={:.2f}$'.format(zlow, zhi))
plt.xlabel('$RA$ $[\\degree]$')
plt.ylabel('$DEC$ $[\\degree]$')
if cf.save:
savename = cf.savename + 'slice_plot_z_{:.2f}_{:.2f}.png'.format(zlow,zhi)
plt.savefig(savename, dpi=300)
plt.show()
def _plot_coord_hist(cf, which: str):
if which=='RA':
gcoord, ccoord = cf.G_ra, cf.C_ra
plt.xlabel(which + r' $[\degree]$')
elif which=='DEC':
gcoord, ccoord = cf.G_dec, cf.C_dec
plt.xlabel(which + r' $[\degree]$')
elif which=='Z':
gcoord, ccoord = cf.G_redshift, cf.C_redshift
plt.xlabel(which + r' $[h^{-1}Mpc]$')
elif which=='R':
gcoord, ccoord = cf.G_radii, cf.C_radii
plt.xlabel(which + r' $[h^{-1}Mpc]$')
else:
raise ValueError('Plotting Error: Can only plot histogram for one of '\
'RA, DEC, Z, R; instead found "{}"'.format(which))
plt.hist(gcoord, label='Galaxies ' + which)
plt.hist(ccoord, label='Centers ' + which, histtype='step')
plt.title('Galaxy and center unweighted counts by ' + which)
plt.ylabel('Count')
plt.legend()
if cf.save:
savename = cf.savename + '{}_hist_r_{}_cut_{}.png'.format(which,
cf.kernel_radius,cf.vote_threshold)
plt.savefig(savename, dpi=300)
plt.show()
| 3,772 | 31.525862 | 83 | py |
conker | conker-main/src/kernel.py | """
Copyright (C) 2021 Gebri Mishtaku
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses.
"""
from math import pi, exp
import numpy as np
import sys
class Kernel:
def __init__(self, type: str, radius: float, grid_spacing: float,
printout: bool, plot: bool, *args):
self.type = type
self.radius = radius
self.args = args
self.grid_spacing = grid_spacing
self.plot = plot
self.printout = printout
if self.type=='step':
# interprets args[0] as step function thickness in h^-1Mpc
self.thickness = self.args[0] if len(self.args)>0 else 1
elif self.type=='gaussian':
# interprets args[0] as stdev of gaussian with mean at kernel rad
self.stdev = self.args[0]
elif self.type=='wavelet':
# interprets args[0] as width of wavelet with scale at kernel rad
self.width = self.args[0]
elif self.type=='custom':
# interprets args[0] as the file containing the custom array
self.source = self.args[0]
elif self.type=='ball':
pass
if self.printout:
print(f'Constructing {self.type} kernel...')
self.grid, self.kernel_r_idx_units = self._make_grid()
if self.printout:
print('Kernel constructed successfully...')
print('Number of nonzero kernel bins:', len(self.grid[self.grid!=0]))
print('Number of empty kernel bins:', len(self.grid[self.grid==0]))
print('Nonzero to empty ratio:',
len(self.grid[self.grid!=0]) / len(self.grid[self.grid==0]))
# this is here for sanity checks
# shows the kernel in 3D with blue disks in nonzero kernel bins
if self.plot:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig, ax = plt.subplots(1, 1, subplot_kw={'projection': '3d'})
ax.scatter(*np.where(self.grid!=0), c='cornflowerblue')
plt.show()
def get_grid(self):
return self.grid
def get_kernel_center(self):
return self.kernel_center
def _calculate_settings(self):
# calculates the kernel inscribed radius in index units.
# for a kernel with radius 100 and grid_spacing 10, the radius
# is 10 in idx unitx. this puts the value of the function at
# r = 100 in bin idx 10 (the 11th bin) of the 1D function.
kernel_r_idx_units = int(self.radius // self.grid_spacing) + 1
# calculates circumscribed sphere radius for easy eval of 1D func
circumscribed_r_idx_units = int(np.ceil(3**.5 * kernel_r_idx_units))
# calculates the number of bins in each dimensional axis
# this calculation ensures an odd numbered gridding so that
# the kernel construction has a distinct central bin on any given run
kernel_bin_count = 2 * kernel_r_idx_units + 1
# central bin index, since the kernel is a cube this can just be one int
kernel_center_idx = kernel_bin_count // 2
self.kernel_center = np.array([kernel_center_idx, ] * 3)
return (
kernel_r_idx_units, circumscribed_r_idx_units,
kernel_bin_count, kernel_center_idx, self.kernel_center)
def _calculate_settings_custom(self, func_arr: np.array):
# look at _calculate_settings() for descriptions of below calculations
kernel_r_idx_units = len(func_arr)
circumscribed_r_idx_units = int(np.ceil(3**.5 * kernel_r_idx_units))
kernel_bin_count = 2 * kernel_r_idx_units + 1
kernel_center_idx = kernel_bin_count // 2
self.kernel_center = np.array([kernel_center_idx, ] * 3)
return (
kernel_r_idx_units, circumscribed_r_idx_units,
kernel_bin_count, kernel_center_idx, self.kernel_center)
def _make_step(self, kernel_r_idx_units: int, circumscribed_r_idx_units: int):
# transforms kernel thickness to index units
thickness_idx_units = int(np.ceil(self.thickness / self.grid_spacing))
# note these have to be lowered by 1 for correct calculation of bounds
# kernel_r_idx_units = 2 needs upper idx bound 1.5 (idx 1 is 2nd idx)
kernel_r_idx_units_upper_bound = kernel_r_idx_units + 0.5 * thickness_idx_units - 1
kernel_r_idx_units_lower_bound = kernel_r_idx_units - 0.5 * thickness_idx_units - 1
step_func = np.array([
1 if i>=kernel_r_idx_units_lower_bound
and i<kernel_r_idx_units_upper_bound
else 0
for i in range(circumscribed_r_idx_units)])
# normalization = 1 / integral
step_func = step_func / np.sum(step_func) # normalizes
return step_func, kernel_r_idx_units_upper_bound, kernel_r_idx_units_lower_bound
def _make_gaussian(self, kernel_r_idx_units: int, circumscribed_r_idx_units: int):
# transforms stdev to index units
stdev_idx_units = int(np.ceil(self.stdev / self.grid_spacing))
# 99.7% of all data in a normal dist is within 3 std devs of mean
kernel_r_idx_units_upper_bound = kernel_r_idx_units + 3*stdev_idx_units - 1
kernel_r_idx_units_lower_bound = kernel_r_idx_units - 3*stdev_idx_units - 1
# calculates 1/(stdev root(2pi)) e^(- (x-mean)^2 / (2 stdev^2))
over_sroot2pi = 1. / (stdev_idx_units * (2.*pi)**.5)
minus_over_2ssquared = -1. / (2. * stdev_idx_units**2)
gauss_func = np.array([
(lambda mean,stdev,x:
over_sroot2pi * exp( minus_over_2ssquared * (x-mean)**2 ))
(kernel_r_idx_units, stdev_idx_units,i)
if i>=kernel_r_idx_units_lower_bound
and i<kernel_r_idx_units_upper_bound
else 0
for i in range(circumscribed_r_idx_units)])
gauss_func = gauss_func / np.sum(gauss_func) # normalizes
return gauss_func, kernel_r_idx_units_upper_bound, kernel_r_idx_units_lower_bound
def _make_wavelet(self, kernel_r_idx_units: int, circumscribed_r_idx_units: int):
# transforms wavelet width parameter to index units
width_idx_units = int(np.ceil(self.width / self.grid_spacing))
kernel_r_idx_units_upper_bound = kernel_r_idx_units + 2*width_idx_units - 1
kernel_r_idx_units_lower_bound = kernel_r_idx_units - 2*width_idx_units - 1
# calculates psi(x) = 1/(4 pi x^2) ( 2 B_3(2(x-R)/s) - B_3((x-R)/s) )
# where B_3(x) = 1/12 (|x-2|^3 - 4|x-1|^3 + 6|x|^3 - 4|x+1|^3 + |x+2|^3)
# R is scale = kernel radius and s is width, both are in index units
@np.vectorize
def _B_3(x):
return (abs(x-2)**3 - 4*abs(x-1)**3 + 6*abs(x)**3 - 4*abs(x+1)**3 + abs(x+2)**3) / 12
@np.vectorize
def _psi(x):
y = (x - kernel_r_idx_units) / width_idx_units
return (2*_B_3(2*y) - _B_3(y)) / (4*pi*x**2)
# wave_func = np.array([psi(i) for i in range(circumscribed_r_idx_units)])
# wave_func = _psi(np.arange(circumscribed_r_idx_units))
wave_func = np.array([
_psi(i)
if i>=kernel_r_idx_units_lower_bound
and i<kernel_r_idx_units_upper_bound
else 0
for i in range(circumscribed_r_idx_units)])
wave_func = wave_func / np.sum(abs(wave_func)) # normalizes
return wave_func, kernel_r_idx_units_upper_bound, kernel_r_idx_units_lower_bound
def _make_custom(self):
# reads custom array from file passed as argument to Kernel
custom_func = np.load(self.source)
# kernel radius here is the whole domain of the user defined function
# multiplied by the grid_spacing, so a function defined over 22 indices with
# a grid_spacing 5Mpc/h infers that the kernel_radius is 110Mpc/h
kernel_r_idx_units_lower_bound = 0
kernel_r_idx_units_upper_bound = len(custom_func)
return custom_func, kernel_r_idx_units_upper_bound, kernel_r_idx_units_lower_bound
def _make_grid(self) -> np.ndarray:
if self.type=='step' or self.type=='gaussian' or self.type=='wavelet':
kernel_r_idx_units, circumscribed_r_idx_units,\
kernel_bin_count, kernel_center_idx, kernel_center = self._calculate_settings()
# evaluates 1D function that's gonna be rotated below
# defined over the entire radius of the circumscribed sphere of the kernel cube
if self.type=='step':
func, kernel_r_idx_units_upper_bound, kernel_r_idx_units_lower_bound = \
self._make_step(kernel_r_idx_units, circumscribed_r_idx_units)
elif self.type=='gaussian':
func, kernel_r_idx_units_upper_bound, kernel_r_idx_units_lower_bound = \
self._make_gaussian(kernel_r_idx_units, circumscribed_r_idx_units)
elif self.type=='wavelet':
func, kernel_r_idx_units_upper_bound, kernel_r_idx_units_lower_bound = \
self._make_wavelet(kernel_r_idx_units, circumscribed_r_idx_units)
elif self.type=='custom':
# the upper bound radius is just the radius here
func, kernel_r_idx_units_upper_bound, kernel_r_idx_units_lower_bound = self._make_custom()
kernel_r_idx_units, circumscribed_r_idx_units,\
kernel_bin_count, kernel_center_idx, kernel_center = self._calculate_settings_custom(func)
# pads function with 0s from the user-fed radius to the circurmscribed radius
func = np.array([func[i] if i<kernel_r_idx_units else 0
for i in range(circumscribed_r_idx_units)])
elif self.type=='ball':
# forms a filled-sphere kernel
kernel_r_idx_units = int(self.radius // self.grid_spacing) + 1
func = np.ones(kernel_r_idx_units)
func /= len(func) # norm
kernel_r_idx_units_lower_bound = 0
kernel_r_idx_units_upper_bound = len(func)
_, circumscribed_r_idx_units,\
kernel_bin_count, kernel_center_idx, kernel_center = self._calculate_settings_custom(func)
# pads function with 0s from the user-fed radius to the circurmscribed radius
func = np.array([func[i] if i<kernel_r_idx_units else 0
for i in range(circumscribed_r_idx_units)])
# print(func)
if self.printout:
print('Kernel radius in index units:', kernel_r_idx_units)
print('Kernel radius upper bound:', kernel_r_idx_units_upper_bound)
print('Kernel radius lower bound:', kernel_r_idx_units_lower_bound)
print('Kernel bin count (side length of grid in idx units):', kernel_bin_count)
print('Integral of 1D kernel function:', np.sum(abs(func)))
if self.plot:
self._plot_1d_func(func, circumscribed_r_idx_units)
return (
Kernel._sphericize(func,
kernel_center, kernel_bin_count,
kernel_r_idx_units_upper_bound),
kernel_r_idx_units
)
@staticmethod
def _sphericize(func: np.array, center: np.array, bin_count: int, radius: int):
kern = np.zeros((bin_count,)*3)
for i in range(bin_count):
for j in range(bin_count):
for k in range(bin_count):
idx_dist = int(np.ceil(np.linalg.norm(np.array([i,j,k])-center)))
if idx_dist < radius:
kern[i,j,k] = func[idx_dist]
return kern
def _plot_1d_func(self, func: np.array, outer_bound: float):
import matplotlib.pyplot as plt
bins = self.grid_spacing * np.arange(0.,outer_bound)
plt.bar(bins, func, width=self.grid_spacing, edgecolor='black')
plt.title(f'Bar plot of discrete kernel function in 1D: {self.type.upper()}')
plt.xlabel('Radial distance from kernel center $[h^{-1}Mpc]$')
plt.show()
| 11,022 | 37.010345 | 93 | py |
DeepIR | DeepIR-main/demo.py | #!/usr/bin/env python
import os
import sys
from pprint import pprint
# Pytorch requires blocking launch for proper working
if sys.platform == 'win32':
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import numpy as np
from scipy import io
import torch
import torch.nn
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
sys.path.append('modules')
import utils
import motion
import dataset
import thermal
if __name__ == '__main__':
imname = 'test1' # Name of the test file name
camera = 'sim' # 'sim', 'boson' or 'lepton'
scale_sr = 8 # 1 for denoising/NUC, 2, 3, .. for SR
nimg = 20 # Number of input images
method = 'dip' # 'cvx' for Hardie et al., 'dip' for DeepIR
# Load config file --
config = dataset.load_config('configs/%s_%s.ini'%(method, camera))
config['batch_size'] = nimg
config['num_workers'] = (0 if sys.platform=='win32' else 4)
config['lambda_prior'] *= (scale_sr/nimg)
# Load data
if not config['real']:
# This is simulated data
im = utils.get_img(imname, 1)
minval = 0
maxval = 1
else:
# This is real data
im, minval, maxval = utils.get_real_im(imname, camera)
# Get data for SR -- this will also get an initial estimate for registration
im, imstack, ecc_mats = motion.get_SR_data(im, scale_sr, nimg, config)
ecc_mats[:, :, 2] *= scale_sr
H, W = im.shape
# Load LPIPs function
config['gt'] = im
# Now run denoising
if method == 'cvx':
im_dip, profile_dip = thermal.interp_convex(imstack.astype(np.float32),
ecc_mats.astype(np.float32),
(H, W), config)
else:
im_dip, profile_dip = thermal.interp_DIP(imstack.astype(np.float32),
ecc_mats.astype(np.float32),
(H, W), config)
# Save data
mdict = {'gt': im,
'rec': im_dip,
'gain': profile_dip['gain'],
'offset': profile_dip['offset'],
'snr': profile_dip['metrics']['snrval'],
'psnr': profile_dip['metrics']['psnrval'],
'ssim': profile_dip['metrics']['ssimval'],
'minval': minval,
'maxval': maxval}
io.savemat('%s_%s_%s_%dx_%d.mat'%(imname, camera, method,
scale_sr, nimg), mdict)
pprint(profile_dip['metrics'])
| 2,636 | 31.555556 | 80 | py |
DeepIR | DeepIR-main/modules/losses.py | #!/usr/bin/env python
import torch
class TVNorm():
def __init__(self, mode='l1'):
self.mode = mode
def __call__(self, img):
grad_x = img[..., 1:, 1:] - img[..., 1:, :-1]
grad_y = img[..., 1:, 1:] - img[..., :-1, 1:]
if self.mode == 'isotropic':
#return torch.sqrt(grad_x.abs().pow(2) + grad_y.abs().pow(2)).mean()
return torch.sqrt(grad_x**2 + grad_y**2).mean()
elif self.mode == 'l1':
return abs(grad_x).mean() + abs(grad_y).mean()
else:
return (grad_x.pow(2) + grad_y.pow(2)).mean()
class HessianNorm():
def __init__(self):
pass
def __call__(self, img):
# Compute Individual derivatives
fxx = img[..., 1:-1, :-2] + img[..., 1:-1, 2:] - 2*img[..., 1:-1, 1:-1]
fyy = img[..., :-2, 1:-1] + img[..., 2:, 1:-1] - 2*img[..., 1:-1, 1:-1]
fxy = img[..., :-1, :-1] + img[..., 1:, 1:] - \
img[..., 1:, :-1] - img[..., :-1, 1:]
return torch.sqrt(fxx.abs().pow(2) +\
2*fxy[..., :-1, :-1].abs().pow(2) +\
fyy.abs().pow(2)).mean()
class L1Norm():
def __init__(self):
pass
def __call__(self, x1, x2):
return abs(x1 - x2).mean()
class PoissonNorm():
def __init__(self):
pass
def __call__(self, x1, x2):
return (x1 - torch.log(x1 + 1e-12)*x2).mean()
class L2Norm():
def __init__(self):
pass
def __call__(self, x1, x2):
return ((x1 - x2).pow(2)).mean()
| 1,586 | 30.117647 | 80 | py |
DeepIR | DeepIR-main/modules/utils.py | #!/usr/bin/env python
'''
Miscellaneous utilities that are extremely helpful but cannot be clubbed
into other modules.
'''
import torch
# Scientific computing
import numpy as np
import scipy.linalg as lin
from scipy import io
# Plotting
import cv2
import matplotlib.pyplot as plt
def nextpow2(x):
'''
Return smallest number larger than x and a power of 2.
'''
logx = np.ceil(np.log2(x))
return pow(2, logx)
def normalize(x, fullnormalize=False):
'''
Normalize input to lie between 0, 1.
Inputs:
x: Input signal
fullnormalize: If True, normalize such that minimum is 0 and
maximum is 1. Else, normalize such that maximum is 1 alone.
Outputs:
xnormalized: Normalized x.
'''
if x.sum() == 0:
return x
xmax = x.max()
if fullnormalize:
xmin = x.min()
else:
xmin = 0
xnormalized = (x - xmin)/(xmax - xmin)
return xnormalized
def asnr(x, xhat, compute_psnr=False):
'''
Compute affine SNR, which accounts for any scaling and shift between two
signals
Inputs:
x: Ground truth signal(ndarray)
xhat: Approximation of x
Outputs:
asnr_val: 20log10(||x||/||x - (a.xhat + b)||)
where a, b are scalars that miminize MSE between x and xhat
'''
mxy = (x*xhat).mean()
mxx = (xhat*xhat).mean()
mx = xhat.mean()
my = x.mean()
a = (mxy - mx*my)/(mxx - mx*mx)
b = my - a*mx
if compute_psnr:
return psnr(x, a*xhat + b)
else:
return rsnr(x, a*xhat + b)
def rsnr(x, xhat):
'''
Compute reconstruction SNR for a given signal and its reconstruction.
Inputs:
x: Ground truth signal (ndarray)
xhat: Approximation of x
Outputs:
rsnr_val: RSNR = 20log10(||x||/||x-xhat||)
'''
xn = lin.norm(x.reshape(-1))
en = lin.norm((x-xhat).reshape(-1)) + 1e-12
rsnr_val = 20*np.log10(xn/en)
return rsnr_val
def psnr(x, xhat):
''' Compute Peak Signal to Noise Ratio in dB
Inputs:
x: Ground truth signal
xhat: Reconstructed signal
Outputs:
snrval: PSNR in dB
'''
err = x - xhat
denom = np.mean(pow(err, 2)) + 1e-12
snrval = 10*np.log10(np.max(x)/denom)
return snrval
def embed(im, embedsize):
'''
Embed a small image centrally into a larger window.
Inputs:
im: Image to embed
embedsize: 2-tuple of window size
Outputs:
imembed: Embedded image
'''
Hi, Wi = im.shape
He, We = embedsize
dH = (He - Hi)//2
dW = (We - Wi)//2
imembed = np.zeros((He, We), dtype=im.dtype)
imembed[dH:Hi+dH, dW:Wi+dW] = im
return imembed
def measure(x, noise_snr=40, tau=100):
''' Realistic sensor measurement with readout and photon noise
Inputs:
noise_snr: Readout noise in electron count
tau: Integration time. Poisson noise is created for x*tau.
(Default is 100)
Outputs:
x_meas: x with added noise
'''
x_meas = np.copy(x)
noise = pow(10, -noise_snr/20)*np.random.randn(x_meas.size).reshape(x_meas.shape)
# First add photon noise, provided it is not infinity
if tau != float('Inf'):
x_meas = x_meas*tau
x_meas[x > 0] = np.random.poisson(x_meas[x > 0])
x_meas[x <= 0] = -np.random.poisson(-x_meas[x <= 0])
x_meas = (x_meas + noise)/tau
else:
x_meas = x_meas + noise
return x_meas
def grid_plot(imdata):
'''
Plot 3D set of images into a 2D grid using subplots.
Inputs:
imdata: N x H x W image stack
Outputs:
None
'''
N, H, W = imdata.shape
nrows = int(np.sqrt(N))
ncols = int(np.ceil(N/nrows))
for idx in range(N):
plt.subplot(nrows, ncols, idx+1)
plt.imshow(imdata[idx, :, :], cmap='gray')
plt.xticks([], [])
plt.yticks([], [])
def build_montage(images):
'''
Build a montage out of images
'''
nimg, H, W = images.shape
nrows = int(np.ceil(np.sqrt(nimg)))
ncols = int(np.ceil(nimg/nrows))
montage_im = np.zeros((H*nrows, W*ncols), dtype=np.float32)
cnt = 0
for r in range(nrows):
for c in range(ncols):
h1 = r*H
h2 = (r+1)*H
w1 = c*W
w2 = (c+1)*W
if cnt == nimg:
break
montage_im[h1:h2, w1:w2] = images[cnt, ...]
cnt += 1
return montage_im
def ims2rgb(im1, im2):
'''
Concatenate images into RGB
Inputs:
im1, im2: Two images to compare
'''
H, W = im1.shape
imrgb = np.zeros((H, W, 3))
imrgb[..., 0] = im1
imrgb[..., 2] = im2
return imrgb
def textfunc(im, txt):
return cv2.putText(im, txt, (30, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(1, 1, 1),
2,
cv2.LINE_AA)
def get_img(imname, scaling):
# Read image
im = cv2.resize(plt.imread('data/%s.png'%imname), None,
fx=scaling, fy=scaling)
if im.ndim == 2:
im = im[:, :, np.newaxis]
im = im[:, :, [0, 0, 0]]
im = np.copy(im, order='C')
H, W, _ = im.shape
return np.copy(im[..., 1], order='C').astype(np.float32)
def get_real_im(imname, camera):
im = io.loadmat('data/%s/%s.mat'%(camera, imname))['imstack']
minval = im.min()
maxval = im.max()
if camera == 'rgb':
im = normalize(im[:, ::2, ::2], True)
else:
im = normalize(im, True).astype(np.float32)
return im, minval, maxval
def boxify(im, topleft, boxsize, color=[1, 1, 1], width=2):
'''
Generate a box around a region.
'''
h, w = topleft
dh, dw = boxsize
im[h:h+dh+1, w:w+width, :] = color
im[h:h+width, w:w+dh+width, :] = color
im[h:h+dh+1, w+dw:w+dw+width, :] = color
im[h+dh:h+dh+width, w:w+dh+width, :] = color
return im
def get_inp(tensize, const=10.0):
'''
Wrapper to get a variable on graph
'''
inp = torch.rand(tensize).cuda()/const
inp = torch.autograd.Variable(inp, requires_grad=True).cuda()
inp = torch.nn.Parameter(inp)
return inp
| 6,530 | 21.996479 | 85 | py |
DeepIR | DeepIR-main/modules/dataset.py | #!/usr/bin/env python
import os
import sys
import tqdm
import pdb
import math
import configparser
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from PIL import Image
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
import skimage
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import cv2
def get_mgrid(sidelen, dim=2):
'''Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1.
sidelen: int
dim: int'''
tensors = tuple(dim * [torch.linspace(-1, 1, steps=sidelen)])
mgrid = torch.stack(torch.meshgrid(*tensors), dim=-1)
mgrid = mgrid.reshape(-1, dim)
return mgrid
def xy_mgrid(H, W):
'''
Generate a flattened meshgrid for heterogenous sizes
Inputs:
H, W: Input dimensions
Outputs:
mgrid: H*W x 2 meshgrid
'''
Y, X = torch.meshgrid(torch.linspace(-1, 1, H),
torch.linspace(-1, 1, W))
mgrid = torch.stack((X, Y), dim=-1).reshape(-1, 2)
return mgrid
class ImageDataset(Dataset):
def __init__(self, img):
super().__init__()
H, W, nchan = img.shape
img = torch.tensor(img)[..., None]
self.pixels = img.view(-1, nchan)
self.coords = xy_mgrid(H, W)
def __len__(self):
return 1
def __getitem__(self, idx):
if idx > 0: raise IndexError
return self.coords, self.pixels
class Image3x3Dataset(Dataset):
def __init__(self, img):
super().__init__()
H, W, nchan = img.shape
img = torch.tensor(img)[..., None]
self.pixels = img.view(-1, nchan)
self.coords = xy_mgrid(H, W)
# Stack coordinates in the 3x3 neighborhood
coords_stack = []
for xshift in [0, 1]:
for yshift in [0, 1]:
shift_array = np.array([xshift/W, yshift/H]).reshape(1, 2)
coords_stack.append(self.coords + shift_array)
self.coords = np.hstack(coords_stack).astype(np.float32)
def __len__(self):
return 1
def __getitem__(self, idx):
if idx > 0: raise IndexError
return self.coords, self.pixels
class ImageFlowDataset(Dataset):
def __init__(self, img1, img2):
super().__init__()
H, W = img1.shape
img1 = torch.tensor(img1)[..., None]
img2 = torch.tensor(img2)[..., None]
self.pixels1 = img1.view(-1, 1)
self.pixels2 = img2.view(-1, 1)
self.coords = xy_mgrid(H, W)
def __len__(self):
return 1
def __getitem__(self, idx):
if idx > 0: raise IndexError
return self.coords, self.pixels1, self.pixels2
class ImageRegDataset(Dataset):
def __init__(self, imstack):
super().__init__()
self.imstack = imstack
self.nimg, H, W = imstack.shape
def __len__(self):
return self.nimg
def __getitem__(self, idx):
img = torch.tensor(self.imstack[idx, ...])[None, ...]
return img, idx
class ImageStackDataset(Dataset):
def __init__(self, imstack):
super().__init__()
self.imstack = imstack
self.nimg, H, W = imstack.shape
self.coords = xy_mgrid(H, W)
def __len__(self):
return self.nimg
def __getitem__(self, idx):
img = torch.tensor(self.imstack[idx, ...])
pixels = img[None, ...].permute(1, 2, 0).view(-1, 1)
return self.coords, pixels
class ImageSRDataset(Dataset):
def __init__(self, imstack, Xstack=None, Ystack=None, masks=None,
jitter=False, xjitter=None, yjitter=None, get_indices=False):
super().__init__()
self.imstack = imstack
self.Xstack = Xstack
self.Ystack = Ystack
self.masks = masks
self.jitter = jitter
self.get_indices = get_indices
self.nimg, self.H, self.W = imstack.shape
if xjitter is None:
self.xjitter = 1/self.W
self.yjitter = 1/self.H
else:
self.xjitter = xjitter
self.yjitter = yjitter
def __len__(self):
return self.nimg
def __getitem__(self, idx):
img = torch.tensor(self.imstack[idx, ...])
# If Jitter is enabled, return stratified sampled coordinates
pixels = img[None, ...].permute(1, 2, 0).view(-1, 1)
if self.masks is not None:
mask = torch.tensor(self.masks[idx, ...])
mask = mask[None, ...].permute(1, 2, 0).view(-1, 1)
else:
mask = torch.zeros(1)
if self.Xstack is not None:
coords = torch.stack((torch.tensor(self.Xstack[idx, ...]),
torch.tensor(self.Ystack[idx, ...])),
dim=-1).reshape(-1, 2)
else:
coords = torch.zeros(1)
if self.get_indices:
return coords, pixels, mask, idx
else:
return coords, pixels, mask
class ImageChunkDataset(Dataset):
def __init__(self, imstack, patchsize):
super().__init__()
self.imstack = imstack
self.nimg, self.H, self.W = imstack.shape
self.patchsize = patchsize
self.patch_coords = xy_mgrid(patchsize[0], patchsize[1])
self.nH = int(np.ceil(self.H/patchsize[0]))
self.nW = int(np.ceil(self.W/patchsize[1]))
def __len__(self):
return (self.nH * self.nW)
def __getitem__(self, idx):
w_idx = int(idx%self.nH)
h_idx = int((idx - w_idx)//self.nH)
h1 = h_idx*self.patchsize[0]
h2 = h_idx*self.patchsize[0] + self.patchsize[0]
w1 = w_idx*self.patchsize[1]
w2 = w_idx*self.patchsize[1] + self.patchsize[1]
if h2 > self.H:
h1 = self.H - self.patchsize[0]
h2 = self.H
if w2 > self.W:
w1 = self.W - self.patchsize[1]
w2 = self.W
img = torch.tensor(self.imstack[:, h1:h2, w1:w2])
pixels = img.reshape(-1, 1)
coords = torch.clone(self.patch_coords)
coords[:, 0] = coords[:, 0] + w1
coords[:, 1] = coords[:, 1] + h1
coords = torch.repeat_interleave(coords, self.nimg, 0)
return coords, pixels
def load_config(configpath):
'''
Load configuration file
'''
parser = configparser.ConfigParser()
parser.read(configpath)
params_dict = dict()
for section in parser.keys():
for key in parser[section].keys():
token = parser[section][key]
if token == 'False':
params_dict[key] = False
elif token == 'True':
params_dict[key] = True
elif '.' in token:
params_dict[key] = float(token)
else:
try:
params_dict[key] = int(token)
except:
params_dict[key] = token
return params_dict | 7,382 | 27.287356 | 81 | py |
DeepIR | DeepIR-main/modules/thermal.py | #!/usr/bin/env python
'''
Routines for dealing with thermal images
'''
import tqdm
import copy
import cv2
import numpy as np
from skimage.metrics import structural_similarity as ssim_func
import torch
import kornia
import torch.nn.functional as F
import utils
import losses
import motion
import deep_prior
def get_metrics(gt, estim, pad=True):
'''
Compute SNR, PSNR, SSIM, and LPIP between two images.
Inputs:
gt: Ground truth image
estim: Estimated image
lpip_func: CUDA function for computing lpip value
pad: if True, remove boundaries when computing metrics
Outputs:
metrics: dictionary with following fields:
snrval: SNR of reconstruction
psnrval: Peak SNR
ssimval: SSIM
lpipval: VGG perceptual metrics
'''
if min(gt.shape) < 50:
pad = False
if pad:
gt = gt[20:-20, 20:-20]
estim = estim[20:-20, 20:-20]
snrval = utils.asnr(gt, estim)
psnrval = utils.asnr(gt, estim, compute_psnr=True)
ssimval = ssim_func(gt, estim)
metrics = {'snrval': snrval,
'psnrval': psnrval,
'ssimval': ssimval}
return metrics
def create_fpn(imsize, vmin=0.9, vmax=1, method='col', rank=1):
'''
Generate fixed pattern noise for microbolometer-type sensors
Inputs:
imsize: (H, W) tuple
vmin, vmax: Minimum and maximum value of gain
method:
'col' -- generate column only noise
'both' -- generate rank-k noise
'corr_col' -- correlated columns
'corr_both' -- correlated rows and columns
rank: if method is 'both' generate noise with this rank.
Outputs:
fpn: (H, W)-sized fixed pattern noise
'''
H, W = imsize
if method == 'col':
fpn = np.ones((H, 1)).dot(vmin + (vmax-vmin)*np.random.rand(1, W))
elif method == 'both':
fpn = 0
for idx in range(rank):
col = vmin + (vmax - vmin)*np.random.rand(H, 1)
row = vmin + (vmax - vmin)*np.random.rand(1, W)
fpn += col.dot(row)
fpn /= rank
elif method == 'corr_col':
row = vmin + (vmax-vmin)*np.random.rand(W)
row = np.convolve(row, np.ones(5)/5, mode='same')
fpn = np.ones((H, 1)).dot(row.reshape(1, W))
elif method == 'corr_both':
row = vmin + (vmax-vmin)*np.random.rand(W)
row = np.convolve(row, np.ones(5)/5, mode='same')
col = vmin + (vmax-vmin)*np.random.rand(H)
col = np.convolve(col, np.ones(5)/5, mode='same')
fpn = col.reshape(H, 1).dot(row.reshape(1, W))
return fpn
def reg_avg_denoise(imstack, ecc_mats=None):
'''
Denoise a thermal stack by registering and averaging.
Inputs:
Outputs:
im_denoised: Denoised image
'''
nimg, H, W = imstack.shape
# if ecc_mats is none, register the stack
if ecc_mats is None:
ecc_mats = motion.register_stack(imstack, (H, W))[:, :2, :]
# Now warp image back to reference and average
ecc_inv = motion.invert_regstack(ecc_mats)
imten = torch.tensor(imstack.astype(np.float32))[:, None, ...]
ecc_ten = torch.tensor(ecc_inv.astype(np.float32))
imwarped = kornia.geometry.warp_affine(imten, ecc_ten, (H, W), flags='bilinear')
im_denoised = imwarped.mean(0)[0, ...].numpy()
weights = (imwarped > 0).type(torch.float32).mean(0)[0, ...].numpy()
weights[weights == 0] = 1
im_denoised /= weights
return im_denoised
def interp_DIP(imstack, reg_stack, hr_size, params_dict):
'''
Super resolve from a stack of images using deep image prior
Inputs:
imstack: (nimg, Hl, Wl) stack of low resolution images
reg_stack: (nimg, 2, 3) stack of affine matrices
hr_size: High resolution image size
params_dict: Dictionary containing parameters for optimization
kernel_type: Type of downsampling
input_type: Type of input
input_depth: Depth of input data (number of channels)
skip_n33d: Parameter for the neural network
skip_n33u: Parameter for the neural network
skip_n11: Parameter for the neural network
num_scales: Parameter for the neural network
upsample_mode: Parameter for the neural network
niters: Number of DIP iterations
batch_size: Batch size of data
num_workers: Workers for data loading
learning_rate: Learning rate for optimization
prior_type: tv, or hessian
lambda_prior: Prior weight
optimize_reg: If True, optimize registration parameters
visualize: If True, visualize reconstructions at each iteration
gt: If visualize is true, gt is the ground truth image
reg_final: If True, register the final result to gt
lpip_func: If gt is true, evaluate perceptual similarity with
this function
Returns:
im_hr: High resolution image
profile: Dictionary containing the following:
loss_array: Array with loss at each iteration
trained_model: State dictionary for best model
metrics: if gt is provided, this is a dictionary with:
snrval: SNR of reconstruction
psnrval: Peak SNR
ssimval: SSIM
lpipval: VGG perceptual metrics
'''
nimg, Hl, Wl = imstack.shape
H, W = hr_size
scale_sr = 0.5*(H/Hl + W/Wl)
# Internal constant
img_every = 2
if params_dict['mul_gain']:
lambda_offset = 10
else:
lambda_offset = 0
# Create loss functions
criterion_fidelity = losses.L1Norm()
criterion_offset = losses.TVNorm(mode='l2')
if params_dict['prior_type'] == 'tv':
criterion_prior = losses.TVNorm()
elif params_dict['prior_type'] == 'hessian':
criterion_prior = losses.HessianNorm()
else:
raise ValueError('Prior not implemented')
# Create input
model_input = deep_prior.get_noise(params_dict['input_depth'],
params_dict['input_type'],
(H, W)).cuda().detach()
# Create the network
if params_dict['predmode'] == 'combined':
nchan = 3
else:
nchan = 1
model = deep_prior.get_net(params_dict['input_depth'], 'skip',
'reflection', n_channels=nchan,
skip_n33d=params_dict['skip_n33d'],
skip_n33u=params_dict['skip_n33u'],
skip_n11=params_dict['skip_n11'],
num_scales=params_dict['num_scales'],
upsample_mode=params_dict['upsample_mode']
).cuda()
# Set it to training
model.train()
if params_dict['integrator'] == 'learnable':
kernel_size = (int(scale_sr), int(scale_sr))
integrator = torch.nn.Conv2d(1, 1, kernel_size=kernel_size,
stride=int(scale_sr), bias=False).cuda()
with torch.no_grad():
integrator.weight.fill_(1.0/(scale_sr*scale_sr))
# Create parameters from affine matrices
affine_mat = torch.tensor(reg_stack).cuda()
affine_var = torch.autograd.Variable(affine_mat, requires_grad=True).cuda()
affine_param = torch.nn.Parameter(affine_var)
# Create gain parameter
vmin = params_dict['fpn_vmin']
params = list(model.parameters())
if params_dict['predmode'] != 'combined':
gain = torch.ones(1, 1, Hl, Wl).cuda()
gain_var = torch.autograd.Variable(gain, requires_grad=True).cuda()
gain_param = torch.nn.Parameter(gain_var)
offset = torch.ones(1, 1, Hl, Wl).cuda()*1e-1
offset_var = torch.autograd.Variable(offset, requires_grad=True).cuda()
offset_param = torch.nn.Parameter(offset_var)
params += [gain_param] + [offset_param]
if params_dict['integrator'] == 'learnable':
params += integrator.parameters()
# Create an ADAM optimizer
optimizer = torch.optim.Adam(lr=params_dict['learning_rate'],
params=params)
# Affine transform requires a separate optimizer
reg_optimizer = torch.optim.Adam(lr=params_dict['affine_learning_rate'],
params=[affine_param])
loss_array = np.zeros(params_dict['niters'])
best_loss = float('inf')
best_state_dict = None
# We will just use all data
gt = torch.tensor(imstack).cuda()[:, None, ...]
for epoch in tqdm.tqdm(range(params_dict['niters'])):
train_loss = 0
img_and_gain = model(model_input)
img_hr = img_and_gain[:, [0], ...]
if params_dict['predmode'] == 'combined':
gain_param = img_and_gain[:, [1], ...]
offset_param = img_and_gain[:, [2], ...]
if scale_sr > 1:
gain_param = F.interpolate(gain_param, (Hl, Wl))
offset_param = F.interpolate(offset_param, (Hl, Wl))
# Generate low resolution images
img_hr_cat = torch.repeat_interleave(img_hr, nimg, 0)
if params_dict['integrator'] == 'area':
img_hr_affine = kornia.geometry.warp_affine(img_hr_cat, affine_param,
(H, W), align_corners=True)
img_lr = F.interpolate(img_hr_affine, (Hl, Wl), mode='area')
elif params_dict['integrator'] == 'learnable':
img_hr_affine = kornia.geometry.warp_affine(img_hr_cat, affine_param,
(H, W), align_corners=True)
img_lr = integrator(img_hr_affine)
else:
img_lr = kornia.geometry.warp_affine(img_hr_cat,
affine_param/scale_sr,
(Hl, Wl), align_corners=False)
# Multiply with the gain term
mask = img_lr > 0
if params_dict['add_offset']:
img_lr = img_lr + offset_param
if params_dict['mul_gain']:
img_lr = gain_param * img_lr
mse_loss = criterion_fidelity(img_lr*mask, gt*mask)
prior_loss = params_dict['lambda_prior']*criterion_prior(img_hr)
loss = mse_loss + prior_loss
if params_dict['add_offset']:
offset_loss = lambda_offset*criterion_offset(offset_param)
loss = loss + offset_loss
optimizer.zero_grad()
if params_dict['optimize_reg']:
reg_optimizer.zero_grad()
loss.backward()
optimizer.step()
if params_dict['optimize_reg']:
reg_optimizer.step()
train_loss = loss.item()
# Find if we have the best mode
if train_loss < best_loss:
best_loss = train_loss
best_state_dict = copy.deepcopy(model.state_dict())
loss_array[epoch] = train_loss
if params_dict['visualize']:
if epoch%img_every == 0:
with torch.no_grad():
img_hr_cpu = img_hr.cpu().detach().numpy().reshape(H, W)
v_idx = np.random.randint(nimg)
img_lr_cpu = img_lr[v_idx, ...].cpu().detach().reshape(Hl, Wl)
snrval = utils.asnr(params_dict['gt'], img_hr_cpu,
compute_psnr=True)
ssimval = ssim_func(params_dict['gt'], img_hr_cpu)
txt = 'PSNR: %.1f | SSIM: %.2f'%(snrval, ssimval)
gain = gain_param.cpu().detach().numpy().reshape(Hl, Wl)
offset = offset_param.cpu().detach().numpy().reshape(Hl, Wl)
img_hr_ann = utils.textfunc(img_hr_cpu/img_hr_cpu.max(), txt)
imtop = np.hstack((imstack[v_idx, ...], img_lr_cpu.numpy()))
imbot = np.hstack((gain/gain.max(), offset/offset.max()))
imcat = np.vstack((imtop, imbot))
imcat_full = np.hstack((params_dict['gt'], img_hr_ann))
cv2.imshow('Recon LR', np.clip(imcat, 0, 1))
cv2.imshow('Recon HR', np.clip(imcat_full, 0, 1))
cv2.waitKey(1)
# We are done, obtain the best model
model.eval()
with torch.no_grad():
model.load_state_dict(best_state_dict)
img_and_gain = model(model_input)
img_hr = img_and_gain[[0], [0], ...].reshape(1, 1, H, W)
img_hr = kornia.geometry.warp_affine(img_hr,
affine_param[[0], ...], (H, W))
img_hr = img_hr.cpu().detach().numpy().reshape(H, W)
if params_dict['predmode'] == 'combined':
gain_param = img_and_gain[0, 1, ...]
offset_param = img_and_gain[0, 2, ...]
# In case there's a shift in reconstruction
if params_dict['reg_final'] and 'gt' in params_dict:
try:
img_hr = motion.ecc_flow(params_dict['gt'], img_hr)[1]
except:
pass
# If ground truth is provided, return metrics
if 'gt' in params_dict:
metrics = get_metrics(params_dict['gt'], img_hr)
gain = gain_param.detach().cpu().numpy().reshape(Hl, Wl)
offset = offset_param.detach().cpu().numpy().reshape(Hl, Wl)
profile = {'loss_array': loss_array,
'trained_model': best_state_dict,
'metrics': metrics,
'ecc_mats': affine_param.detach().cpu().numpy(),
'gain': gain,
'offset': offset}
return img_hr, profile
def interp_convex(imstack, reg_stack, hr_size, params_dict):
'''
Super resolve from a stack of images using convex optimization
Inputs:
imstack: (nimg, Hl, Wl) stack of low resolution images
reg_stack: (nimg, 2, 3) stack of affine matrices
hr_size: High resolution image size
params_dict: Dictionary containing parameters for optimization
niters: Number of SIREN iterations
batch_size: Batch size of data
num_workers: Workers for data loading
learning_rate: Learning rate for optimization
prior_type: tv, or hessian
lambda_prior: Prior weight
optimize_reg: If True, optimize registration parameters
visualize: If True, visualize reconstructions at each iteration
gt: If visualize is true, gt is the ground truth image
reg_final: If True, register the final result to gt
lpip_func: If gt is true, evaluate perceptual similarity with
this function
Returns:
im_hr: High resolution image
profile: Dictionary containing the following:
loss_array: Array with loss at each iteration
trained_model: State dictionary for best model
metrics: if gt is provided, this is a dictionary with:
snrval: SNR of reconstruction
psnrval: Peak SNR
ssimval: SSIM
lpipval: VGG perceptual metrics
'''
nimg, Hl, Wl = imstack.shape
H, W = hr_size
scale_sr = 0.5*(H/Hl + W/Wl)
# Internal constant
img_every = 10
lambda_offset = 10
# Create loss functions
criterion_fidelity = losses.L2Norm()
if params_dict['prior_type'] == 'tv':
criterion_prior = losses.TVNorm()
elif params_dict['prior_type'] == 'hessian':
criterion_prior = losses.HessianNorm()
elif params_dict['prior_type'] == 'l2':
criterion_prior = losses.L2Norm()
else:
raise ValueError('Prior not implemented')
# Initialize solution with linear interpolation
#im_init = torch.tensor(interp_SR(imstack, reg_stack, hr_size))
im_init = torch.rand(H, W)
gain_init = torch.rand(Hl, Wl)
offset_init = torch.ones(Hl, Wl)*1e-2
# Create the variable
img_hr_param = torch.autograd.Variable(im_init[None, None, ...],
requires_grad=True).cuda()
img_hr_param = torch.nn.Parameter(img_hr_param)
# Create gain parameter
gain_param = torch.autograd.Variable(gain_init[None, None, ...],
requires_grad=True).cuda()
gain_param = torch.nn.Parameter(gain_param)
# Create offset parameter
offset_param = torch.autograd.Variable(offset_init[None, None, ...],
requires_grad=True).cuda()
offset_param = torch.nn.Parameter(offset_param)
# Create parameters from affine matrices
affine_mat = torch.tensor(reg_stack).cuda()
affine_var = torch.autograd.Variable(affine_mat, requires_grad=True).cuda()
affine_param = torch.nn.Parameter(affine_var)
params = [img_hr_param] + [gain_param] + [offset_param]
if params_dict['optimize_reg']:
params += [affine_param]
#params += [angles_param] + [translations_param]
# Create an ADAM optimizer
optimizer = torch.optim.Adam(lr=params_dict['learning_rate'],
params=params)
loss_array = np.zeros(params_dict['niters'])
gt = torch.tensor(imstack).cuda()[:, None, ...]
for epoch in tqdm.tqdm(range(params_dict['niters'])):
# Generate low resolution images
img_hr_cat = torch.repeat_interleave(img_hr_param, gt.shape[0], 0)
if params_dict['integrator'] == 'area':
img_hr_affine = kornia.geometry.warp_affine(img_hr_cat, affine_param,
(H, W), align_corners=False)
img_lr = F.interpolate(img_hr_affine, (Hl, Wl), mode='area')
else:
img_lr = kornia.geometry.warp_affine(img_hr_cat,
affine_param/scale_sr,
(Hl, Wl), align_corners=False)
gain_cat = torch.repeat_interleave(gain_param, gt.shape[0], 0)
#offset_cat = torch.repeat_interleave(offset_param, gt.shape[0], 0)
masks = img_lr > 0
#if epoch > params_dict['niters']:
# img_lr = img_lr*(gain_cat + offset_cat)
#else:
img_lr = img_lr + gain_cat
mse_loss = criterion_fidelity(img_lr, gt)
prior_loss = criterion_prior(img_hr_param)
#offset_loss = criterion_prior(offset_param)
loss = mse_loss + params_dict['lambda_prior']*prior_loss
# lambda_offset*offset_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.item()
loss_array[epoch] = train_loss
if params_dict['visualize']:
if epoch%img_every == 0:
img_hr_cpu = img_hr_param.cpu().detach().numpy().reshape(H, W)
v_idx = np.random.randint(nimg)
img_lr_cpu = img_lr[v_idx, ...]
img_lr_cpu = img_lr_cpu.cpu().detach().numpy().reshape(Hl, Wl)
gain = gain_param.cpu().detach().numpy().reshape(Hl, Wl)
offset = offset_param.cpu().detach().numpy().reshape(Hl, Wl)
snrval = utils.psnr(params_dict['gt'], img_hr_cpu)
ssimval = ssim_func(params_dict['gt'], img_hr_cpu)
txt = 'PSNR: %.1f | SSIM: %.2f'%(snrval, ssimval)
img_hr_ann = utils.textfunc(img_hr_cpu, txt)
imcat = np.hstack((imstack[v_idx, ...], img_lr_cpu,
gain, offset/offset.max()))
imcat_full = np.hstack((params_dict['gt'], img_hr_ann))
cv2.imshow('Recon LR', np.clip(imcat, 0, 1))
cv2.imshow('Recon HR', np.clip(imcat_full, 0, 1))
cv2.waitKey(1)
# We are done, obtain the best model
with torch.no_grad():
img_hr = kornia.geometry.warp_affine(img_hr_param, affine_param[[0], ...],
(H, W))
img_hr = img_hr_param.cpu().detach().numpy().reshape(H, W)
gain = gain_param.cpu().detach().numpy().reshape(Hl, Wl)
offset = offset_param.cpu().detach().numpy().reshape(Hl, Wl)
# In case there's a shift in reconstruction
if params_dict['reg_final'] and 'gt' in params_dict:
try:
img_hr = motion.ecc_flow(params_dict['gt'], img_hr)[1]
except:
pass
# If ground truth is provided, return metrics
if 'gt' in params_dict:
metrics = get_metrics(params_dict['gt'], img_hr)
profile = {'loss_array': loss_array, 'metrics': metrics,
'gain': gain, 'offset': offset}
return img_hr, profile
| 21,820 | 37.485009 | 84 | py |
DeepIR | DeepIR-main/modules/motion.py | #!/usr/bin/env python
'''
Subroutines for estimating motion between images
'''
import os
import sys
import tqdm
import pdb
import math
import numpy as np
from scipy import linalg
from scipy import interpolate
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from PIL import Image
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
import kornia
from pystackreg import StackReg
import skimage
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import cv2
import utils
import thermal
def xy_mgrid(H, W):
'''
Generate a flattened meshgrid for heterogenous sizes
Inputs:
H, W: Input dimensions
Outputs:
mgrid: H*W x 2 meshgrid
'''
Y, X = torch.meshgrid(torch.linspace(-1, 1, H),
torch.linspace(-1, 1, W))
mgrid = torch.stack((X, Y), dim=-1).reshape(-1, 2)
return mgrid
def getEuclidianMatrix(theta, shift):
'''
Compute 2x3 euclidean matrix
'''
mat = np.array([[np.cos(theta), -np.sin(theta), shift[0]],
[np.sin(theta), np.cos(theta), shift[1]]])
return mat
def fb_flow(frame1, frame2):
H, W = frame1.shape
Y, X = np.mgrid[:H, :W]
hsv = np.zeros((H, W, 3), dtype=np.uint8)
hsv[...,1] = 255
flow = cv2.calcOpticalFlowFarneback(frame1,
frame2,
None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
frame2_warped = cv2.remap(frame2.astype(np.float32),
(X + flow[..., 0]).astype(np.float32),
(Y + flow[..., 1]).astype(np.float32),
cv2.INTER_LINEAR)
rgb_comp = np.zeros((H, W, 3))
rgb_comp[..., 0] = frame1
rgb_comp[..., 2] = frame2_warped
return frame2_warped, flow, rgb, rgb_comp
def ecc_flow(im1, im2, warp_mode=cv2.MOTION_HOMOGRAPHY, niters=1000, eps=1e-8):
'''
Register images using Opencv intensity based image alignment approach.
Inputs:
im1, im2: Images to register. im2 will be registered to im1.
method: One of cv2.MOTION_*** . Default is MOTION_HOMOGRAPRHY
niters: Number of ECC iterations
eps: Stopping tolerance
Outputs:
warp_matrix: Warping matrix
im2_aligned: Second image warped to first image's coordinates
flow: Flow coordinates to go from im2 to im1
https://learnopencv.com/image-alignment-ecc-in-opencv-c-python/
'''
# Find size of image1
sz = im1.shape
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
if warp_mode == cv2.MOTION_HOMOGRAPHY :
warp_matrix = np.eye(3, 3, dtype=np.float32)
else :
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, niters, eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC(im1, im2, warp_matrix, warp_mode,
criteria=criteria, inputMask=None,
gaussFiltSize=5)
if warp_mode == cv2.MOTION_HOMOGRAPHY :
# Use warpPerspective for Homography
flags = cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP
im2_aligned = cv2.warpPerspective(im2, warp_matrix, (sz[1],sz[0]),
flags=flags)
else :
# Use warpAffine for Translation, Euclidean and Affine
flags = cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP
im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]),
flags=flags)
# Create flow coordinates
Y, X = np.mgrid[:sz[0], :sz[1]]
coords = np.ones((3, sz[0]*sz[1]))
coords[0, :] = X.reshape(1, -1)
coords[1, :] = Y.reshape(1, -1)
coords_new = warp_matrix.dot(coords)
if warp_mode == cv2.MOTION_HOMOGRAPHY:
coords_new = coords_new[:2, :]/coords_new[2, :]
flow = np.zeros((sz[0], sz[1], 2), dtype=np.float32)
flow[..., 0] = (coords_new[0, :] - coords[0, :]).reshape(sz)*(2/sz[1])
flow[..., 1] = (coords_new[1, :] - coords[1, :]).reshape(sz)*(2/sz[0])
return warp_matrix, im2_aligned, flow
def get_SR_data(im, scale, nimg=10, config=None):
'''
Wrapper function to get real or simulation data
Inputs:
im: Image or image stack
scale: Scale for resolution
nimg: Number of images
config: Dictionary containing the following files:
simulation: If True, im will be converted to an image stack,
else the input will be treated as imstack
get_gt: If True, and simulation is also True, return groundtruth
registration matrices
shift_max, theta_max: See get_imstack
downsample: If True, the imstack will be a downsampled version of
the data. Only applicable if simulation is false
tau, noise_snr: For simulated data, these represente max. photon
count, and readout noise
add_fpn: If True, add fixed pattern noise to data
fpn_vmin: Minimum value of fpn
fpn_method: 'col' or 'both'
fpn_rank: If 'both', how many patterns to add
Outputs:
im: Ground truth high resolution image. Only useful if simulation
is true, or simulation is false, and downsample is true. Else
it is just a nearest neighbor upsampling
imstack: (nimg, Hl, Wl) stack of low resolution images
ecc_mats: (nimg, 2, 3) affine matrices
'''
# Extract configuration values
if config is None:
simulation = True
get_gt = False
shift_max = 10
theta_max = np.pi/12
downsample = False
add_noise = False
tau = None
noise_snr = None
add_fpn = False
fpn_vmin = 0.9
fpn_method = 'col'
fpn_rank = 1
else:
simulation = not config['real']
get_gt = config['get_gt']
shift_max = config['shift_max']*scale
theta_max = config['theta_max']*np.pi/180
downsample = config['downsample']
add_noise = config['add_noise']
tau = config['tau']
noise_snr = config['noise_snr']
add_fpn = config['add_fpn']
fpn_vmin = config['fpn_vmin']
fpn_method = config['fpn_method']
fpn_rank = config['fpn_rank']
if simulation is True:
H, W = im.shape
imstack, _, _, mats = get_imstack(im, scale, shift_max,
theta_max, nimg)
imstack /= im.max()
im /= im.max()
if add_noise:
imstack = utils.measure(imstack, noise_snr, tau)
if add_fpn:
fpn = thermal.create_fpn(imstack.shape[1:], vmin=fpn_vmin,
method=fpn_method, rank=fpn_rank)
imstack = imstack*fpn[np.newaxis, ...]
_, Hl, Wl = imstack.shape
if get_gt:
ecc_mats = invert_regstack(mats)
else:
ecc_mats = register_stack(imstack, (Hl, Wl))[:, :2, :]
else:
_, H, W = im.shape
imstack = np.copy(im[:nimg, ...], order='C')
interp = cv2.INTER_AREA
if downsample:
imstack_lr = np.zeros((nimg, H//scale, W//scale))
Hl, Wl = H//scale, W//scale
for idx in range(nimg):
imstack_lr[idx, ...] = cv2.resize(imstack[idx, ...],
(W//scale, H//scale),
interpolation=interp)
im = imstack[0, ...]
imstack = imstack_lr.astype(np.float32)
else:
im = cv2.resize(imstack[0, ...], (W*scale, H*scale))
Hl, Wl = H, W
H, W = Hl*scale, Wl*scale
ecc_mats = register_stack(imstack, (Hl, Wl))[:, :2, :]
imstack /= im.max()
im /= im.max()
return im, imstack, ecc_mats
def get_random_affine(nimg, shift_max=10, theta_max=np.pi/12, perspective=False):
'''
Get affine matrices with random shifts and thetas
'''
shifts = np.random.randint(-shift_max, shift_max, size=[nimg, 2])
thetas = (2*np.random.rand(nimg)-1)*theta_max
shifts[0, ...] = 0
thetas[0] = 0
affine_mats = np.zeros((nimg, 3, 3))
affine_mats[:, 2, 2] = 1.0
for idx in range(nimg):
affine_mats[idx, :2, :] = getEuclidianMatrix(thetas[idx],
shifts[idx, ...])
# Set first matrix to identity
affine_mats[0, :, :] = 0
affine_mats[0, 0, 0] = 1
affine_mats[0, 1, 1] = 1
affine_mats[0, 2, 2] = 1
if perspective is False:
affine_mats = affine_mats[:, :2, :]
return affine_mats
def get_imstack(im, scale, shift_max=10, theta_max=np.pi/12, nshifts=5):
'''
Obtain synthetically generated, low resolution images of im, with
random shifts.
Inputs:
im: Input high resolution image
scale: Downsampling factor (> 1)
theta_max: Maximum angle of rotation
nshifts: Number of shifted images to obtain
perturb_coords: If True, perturb the coordinates to study the effect
of erroneous registration
Outputs:
imstack: Stack of images
coordstack: Stack of (x ,y) coordinates for each image
'''
H, W = im.shape
#shifts = np.random.randint(-shift_max, shift_max, size=[nshifts, 2])
shifts = -shift_max + 2*shift_max*np.random.rand(nshifts, 2)
thetas = (2*np.random.rand(nshifts)-1)*theta_max
Y, X = np.mgrid[:H, :W]
tmp = cv2.resize(im, None, fx=1/scale, fy=1/scale)
Hl, Wl = tmp.shape
imstack = np.zeros((nshifts, Hl, Wl), dtype=np.float32)
Xstack = np.zeros_like(imstack)
Ystack = np.zeros_like(imstack)
mats = np.zeros((nshifts, 2, 3))
# Ensure first shift and theta are zero
shifts[0, :] = 0
thetas[0] = 0
coords = np.hstack((X.reshape(-1, 1), Y.reshape(-1, 1), np.ones((H*W, 1))))
for idx in range(nshifts):
shift = shifts[idx, :]
theta = thetas[idx]
mat = getEuclidianMatrix(theta, shift)
mats[idx, ...] = mat
coords_new = mat.dot(coords.T).T
Xnew = coords_new[:, 0].reshape(H, W)
Ynew = coords_new[:, 1].reshape(H, W)
Xnew = cv2.resize(Xnew, (Wl, Hl), interpolation=cv2.INTER_LINEAR)
Ynew = cv2.resize(Ynew, (Wl, Hl), interpolation=cv2.INTER_LINEAR)
imstack[idx, ...] = cv2.remap(im, Xnew.astype(np.float32),
Ynew.astype(np.float32),
cv2.INTER_LINEAR)
Xstack[idx, ...] = 2*Xnew/W - 1
Ystack[idx, ...] = 2*Ynew/H - 1
return imstack, Xstack, Ystack, mats
def get_downsampled_shifted_images(im, scale, shift_max=10,
theta_max=np.pi/12, nshifts=5,
perturb_coords=False):
'''
Obtain synthetically generated, low resolution images of im, with
random shifts.
Inputs:
im: Input high resolution image
scale: Downsampling factor (> 1)
theta_max: Maximum angle of rotation
nshifts: Number of shifted images to obtain
perturb_coords: If True, perturb the coordinates to study the effect
of erroneous registration
Outputs:
imstack: Stack of images
coordstack: Stack of (x ,y) coordinates for each image
'''
H, W = im.shape
shifts = np.random.randint(-shift_max, shift_max, size=[nshifts, 2])
thetas = (2*np.random.rand(nshifts)-1)*theta_max
Y, X = np.mgrid[:H, :W]
tmp = cv2.resize(im, None, fx=1/scale, fy=1/scale)
Hl, Wl = tmp.shape
imstack = np.zeros((nshifts, Hl, Wl), dtype=np.float32)
Xstack = np.zeros_like(imstack)
Ystack = np.zeros_like(imstack)
# Ensure first shift and theta are zero
shifts[0, :] = 0
thetas[0] = 0
for idx in range(nshifts):
shift = shifts[idx, :]
theta = thetas[idx]
# Shift
Xshifted = X - shift[1]
Yshifted = Y - shift[0]
# Rotate
Xrot = (Xshifted-W/2)*np.cos(theta) - (Yshifted-H/2)*np.sin(theta) + W/2
Yrot = (Xshifted-W/2)*np.sin(theta) + (Yshifted-H/2)*np.cos(theta) + H/2
Xnew = cv2.resize(Xrot, (Wl, Hl), interpolation=cv2.INTER_AREA)
Ynew = cv2.resize(Yrot, (Wl, Hl), interpolation=cv2.INTER_AREA)
imstack[idx, ...] = cv2.remap(im, Xnew.astype(np.float32),
Ynew.astype(np.float32), cv2.INTER_AREA)
if perturb_coords:
# Now ... let's generate noisy estimates
Xshifted = X - (1 + np.random.randn(1)*1e-2)*shift[1]
Yshifted = Y - (1 + np.random.randn(1)*1e-2)*shift[0]
theta = (1 + np.random.randn(1)*1e-2)*theta
Xrot = (Xshifted-W/2)*np.cos(theta) -\
(Yshifted-H/2)*np.sin(theta) + W/2
Yrot = (Xshifted-W/2)*np.sin(theta) +\
(Yshifted-H/2)*np.cos(theta) + H/2
Xnew = cv2.resize(Xrot, (Wl, Hl), interpolation=cv2.INTER_AREA)
Ynew = cv2.resize(Yrot, (Wl, Hl), interpolation=cv2.INTER_AREA)
Xstack[idx, ...] = 2*Xnew/W - 1
Ystack[idx, ...] = 2*Ynew/H - 1
return imstack, Xstack, Ystack, shifts, thetas
def register_stack(imstack, full_res, method=StackReg.RIGID_BODY):
'''
Register a stack of images and get coordinates
Inputs:
imstack: nimg x H x W stack of images
full_res: Resolution at which images will be super resolved
method: Method to use for registration. Default is StackReg.RIGID_BODY
Outputs:
reg_mats: (nimg, 2, 3) dimensional registration matrices
'''
nimg, H, W = imstack.shape
Hr, Wr = full_res
imstack_full = np.zeros((nimg, Hr, Wr))
# Upsample the images
for idx in range(nimg):
imstack_full[idx, ...] = cv2.resize(imstack[idx, ...], (Wr, Hr),
interpolation=cv2.INTER_AREA)
# Now register the stack
reg = StackReg(method)
reg_mats = reg.register_stack(imstack_full, reference='first', verbose=True)
return reg_mats
def invert_regstack(regstack):
'''
Invert affine matrices
'''
nimg = regstack.shape[0]
regstack_inv = np.zeros_like(regstack)
last_row = np.zeros((1, 3))
last_row[0, 2] = 1
for idx in range(nimg):
mat = linalg.inv(np.vstack((regstack[idx, ...], last_row)))[:2, :]
regstack_inv[idx, ...] = mat
return regstack_inv
def mat2coords(reg_stack, full_res, low_res):
'''
Computed 2D coordinates from affine matrices
Inputs:
reg_stack: (nimg, 2, 3) registration stack
res: Resolution of images
'''
nimg, _, _ = reg_stack.shape
H, W = full_res
Y, X = np.mgrid[:H, :W]
Hl, Wl = low_res
coords = np.hstack((X.reshape(-1, 1), Y.reshape(-1, 1), np.ones((H*W, 1))))
Xstack = np.zeros((nimg, Hl, Wl), dtype=np.float32)
Ystack = np.zeros_like(Xstack)
last_row = np.zeros((1, 3))
last_row[0, 2] = 1
for idx in range(nimg):
mat = linalg.inv(np.vstack((reg_stack[idx, ...], last_row)))
coords_new = mat.dot(coords.T).T
Xstack[idx, ...] = cv2.resize(2*coords_new[:, 0].reshape(H, W)/W - 1,
(Wl, Hl), interpolation=cv2.INTER_AREA)
Ystack[idx, ...] = cv2.resize(2*coords_new[:, 1].reshape(H, W)/H - 1,
(Wl, Hl), interpolation=cv2.INTER_AREA)
return Xstack, Ystack
def param2theta(params, w, h):
'''
Convert affine matrix to parameter that torch can use
Inputs:
params: nimg x 2 x 3 affine matrices
w, h: Width and height of the image
Outputs:
theta: Matrix to use with grid_sample (for example)
Reference:
https://discuss.pytorch.org/t/how-to-convert-an-affine-transform-matrix-into-theta-to-use-torch-nn-functional-affine-grid/24315/4
'''
last_row = np.zeros((1, 3), dtype=np.float32)
last_row[0, 2] = 1
theta = np.zeros_like(params)
for idx in range(params.shape[0]):
param = np.vstack((params[idx, ...], last_row))
param = np.linalg.inv(param)
theta[idx,0,0] = param[0,0]
theta[idx,0,1] = param[0,1]*h/w
theta[idx,0,2] = param[0,2]*2/w + theta[idx,0,0] + theta[idx,0,1] - 1
#theta[idx, 0, 2] = param[0, 2]*2/w + param[0, 0] + param[0, 1] - 1
theta[idx,1,0] = param[1,0]*w/h
theta[idx,1,1] = param[1,1]
theta[idx,1,2] = param[1,2]*2/h + theta[idx,1,0] + theta[idx,1,1] - 1
#theta[idx, 1, 2] = param[1, 2]*2/h + param[1, 0] + param[1, 1] - 1
return theta
def affine2rigid(mats):
'''
Compute rigid body transformations from affine matrices
Inputs:
mats: (nmats, 2, 3) affine matrices
Outputs:
translations: (nmats, 2) translation array
angles: (nmats) angles array
'''
# Compute average angle to reduce numerical errors
if False:
angles = (np.arccos(mats[:, 0, 0]) -
np.arcsin(mats[:, 0, 1]) +
np.arcsin(mats[:, 1, 0]) +
np.arccos(mats[:, 1, 1]))/4.0
angles = np.arccos(mats[:, 0, 0])
translations = mats[:, :, 2]
return angles, translations
def get_transformed_coords(theta, imsize):
'''
Compute transformed coordinates for given affine matrices
'''
B = theta.shape[0]
H, W = imsize
return F.affine_grid(theta, (B, 1, H, W)).reshape(-1, H*W, 2)
def interp_lr(imref, coords, renderer):
'''
Compute affine transformed images from coordinates at high resolution
Inputs:
imref: (1, 1, H, W) low resolution image, upsampled
coords: (B, H, W, 2) high resolution coordinates
renderer: Function to downsample the images
Outputs:
im_lr: (B, 1, Hl, Wl) low resolution transformed images
'''
B = coords.shape[0]
im_hr = F.grid_sample(torch.repeat_interleave(imref, B, 0),
coords, mode='bilinear', align_corners=False)
im_lr = renderer.integrator(im_hr)
return im_lr
def register_stack_ecc(imstack, full_res, method=cv2.MOTION_EUCLIDEAN):
'''
Register a stack of images and get coordinates
Inputs:
imstack: nimg x H x W stack of images
full_res: Resolution at which images will be super resolved
method: Method to use for ECC registration
Outputs:
Xstack: X Coordinates for registration
Ystack: Y Coordinates for registration
mask: (nimg, ) dimensional mask for images that were successfully
registered
alignment_err: (nimg, ) dimensional array of alignment error
'''
nimg, H, W = imstack.shape
Hr, Wr = full_res
mask = np.zeros(nimg)
alignment_err = np.zeros(nimg)
Xstack = np.zeros((nimg, H, W), dtype=np.float32)
Ystack = np.zeros((nimg, H, W), dtype=np.float32)
imref = cv2.resize(imstack[0, ...], (Wr, Hr),
interpolation=cv2.INTER_LINEAR)
mask[0] = 1
Y, X = np.mgrid[:Hr, :Wr]
X = 2*X/Wr - 1
Y = 2*Y/Hr - 1
Xstack[0, ...] = cv2.resize(X, (W, H), interpolation=cv2.INTER_LINEAR)
Ystack[0, ...] = cv2.resize(Y, (W, H), interpolation=cv2.INTER_LINEAR)
if method == cv2.MOTION_HOMOGRAPHY:
ecc_mats = np.zeros((nimg, 3, 3))
ecc_mats[0, 2, 2] = 1
else:
ecc_mats = np.zeros((nimg, 2, 3))
# First image is registered ... to itself
ecc_mats[0, 0, 0] = 1
ecc_mats[0, 1, 1] = 1
for idx in tqdm.tqdm(range(1, nimg)):
im2 = cv2.resize(imstack[idx, ...], (Wr, Hr),
interpolation=cv2.INTER_LINEAR)
try:
mat, im2_aligned, flow = ecc_flow(imref, im2, warp_mode=method)
mask[idx] = 1
ecc_mats[idx, :] = mat
Xstack[idx, ...] = cv2.resize(X - flow[..., 0], (W, H),
interpolation=cv2.INTER_LINEAR)
Ystack[idx, ...] = cv2.resize(Y - flow[..., 1], (W, H),
interpolation=cv2.INTER_LINEAR)
spatial_mask = (im2_aligned != 0)
alignment_err[idx] = abs((imref - im2_aligned)*spatial_mask).mean()
except:
mask[idx] = 0
continue
# Now return the coordinates
return Xstack, Ystack, mask, ecc_mats, alignment_err
def prune_stack(imstack, ecc_mats, full_res, thres=None):
'''
Prune a stack of images which are not well registered.
Inputs:
imstack: nimg x H x W stack of images
ecc_mats: nimg x 2 x 3 stack of transformation matrices
full_res: Full resolution size
thres: Threshold of registration error to consider when rejecting
images. If None, 2*median(error_array) is used
Outputs:
imstack: nimg_good x H x W stack of good images
ecc_mats: nimg_good x 2 x 3 stack of good transformation matrices
'''
nimg, Hl, Wl = imstack.shape
H, W = full_res
if thres is None:
thres = 1
imref = cv2.resize(imstack[0, ...], (W, H), interpolation=cv2.INTER_AREA)
imten = torch.tensor(imref).cuda()[None, None, ...]
imstack_ten = torch.tensor(imstack).cuda()[:, None, ...]
imten = torch.repeat_interleave(imten, int(nimg), 0)
mat = torch.tensor(ecc_mats.astype(np.float32)).cuda()
imtrans = kornia.warp_affine(imten, mat, (Hl, Wl))
imdiff = abs(imtrans - imstack_ten).cpu()[:, 0, ...]
diff_array = (imdiff/(imstack + 1e-2*imstack.max())).mean(-1).mean(-1)
mask = diff_array < thres
imstack = np.copy(imstack[mask == 1, ...], order='C')
ecc_mats = np.copy(ecc_mats[mask == 1, ...], order='C')
imdiff = imdiff[mask == 1, ...]
return imstack, ecc_mats, mask, imdiff
def flow2rgb(flow):
'''
Convert flow to an RGB image to visualize.
'''
H, W, _ = flow.shape
hsv = np.zeros((H, W, 3), dtype=np.uint8)
hsv[..., 1] = 255
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang*180/np.pi/2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) | 23,853 | 33.772595 | 137 | py |
DeepIR | DeepIR-main/modules/deep_prior.py | #!/usr/bin/env
'''
One single file for all things Deep Image Prior
'''
import os
import sys
import tqdm
import pdb
import numpy as np
import torch
from torch import nn
import torchvision
import cv2
from dmodels.skip import skip
from dmodels.texture_nets import get_texture_nets
from dmodels.resnet import ResNet
from dmodels.unet import UNet
class Downsampler(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes, factor, kernel_type, phase=0,
kernel_width=None, support=None, sigma=None,
preserve_size=False):
super(Downsampler, self).__init__()
assert phase in [0, 0.5], 'phase should be 0 or 0.5'
if kernel_type == 'lanczos2':
support = 2
kernel_width = 4 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'lanczos3':
support = 3
kernel_width = 6 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'gauss12':
kernel_width = 7
sigma = 1/2
kernel_type_ = 'gauss'
elif kernel_type == 'gauss1sq2':
kernel_width = 9
sigma = 1./np.sqrt(2)
kernel_type_ = 'gauss'
elif kernel_type in ['lanczos', 'gauss', 'box']:
kernel_type_ = kernel_type
else:
assert False, 'wrong name kernel'
# note that `kernel width` will be different to actual size for phase = 1/2
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width,
support=support, sigma=sigma)
downsampler = nn.Conv2d(n_planes, n_planes,
kernel_size=self.kernel.shape,
stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[i, i] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) / 2.)
else:
pad = int((self.kernel.shape[0] - factor) / 2.)
self.padding = nn.ReplicationPad2d(pad)
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x= input
self.x = x
return self.downsampler_(x)
def get_kernel(factor, kernel_type, phase, kernel_width,
support=None, sigma=None):
assert kernel_type in ['lanczos', 'gauss', 'box']
# factor = float(factor)
if phase == 0.5 and kernel_type != 'box':
kernel = np.zeros([kernel_width - 1, kernel_width - 1])
else:
kernel = np.zeros([kernel_width, kernel_width])
if kernel_type == 'box':
assert phase == 0.5, 'Box filter is always half-phased'
kernel[:] = 1./(kernel_width * kernel_width)
elif kernel_type == 'gauss':
assert sigma, 'sigma is not specified'
assert phase != 0.5, 'phase 1/2 for gauss not implemented'
center = (kernel_width + 1.)/2.
print(center, kernel_width)
sigma_sq = sigma * sigma
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
di = (i - center)/2.
dj = (j - center)/2.
kernel[i - 1][j - 1] = np.exp(-(di * di + dj * dj)/(2 * sigma_sq))
kernel[i - 1][j - 1] = kernel[i - 1][j - 1]/(2. * np.pi * sigma_sq)
elif kernel_type == 'lanczos':
assert support, 'support is not specified'
center = (kernel_width + 1) / 2.
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
if phase == 0.5:
di = abs(i + 0.5 - center) / factor
dj = abs(j + 0.5 - center) / factor
else:
di = abs(i - center) / factor
dj = abs(j - center) / factor
pi_sq = np.pi * np.pi
val = 1
if di != 0:
val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)
val = val / (np.pi * np.pi * di * di)
if dj != 0:
val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)
val = val / (np.pi * np.pi * dj * dj)
kernel[i - 1][j - 1] = val
else:
assert False, 'wrong method name'
kernel /= kernel.sum()
return kernel
def get_noise(input_depth, method, spatial_size, noise_type='u', var=1./10):
"""Returns a pytorch.Tensor of size
(1 x `input_depth` x `spatial_size[0]` x `spatial_size[1]`)
initialized in a specific way.
Args:
input_depth: number of channels in the tensor
method: `noise` for fillting tensor with noise; `meshgrid`
for np.meshgrid
spatial_size: spatial size of the tensor to initialize
noise_type: 'u' for uniform; 'n' for normal
var: a factor, a noise will be multiplicated by. Basically it is
standard deviation scaler.
"""
if isinstance(spatial_size, int):
spatial_size = (spatial_size, spatial_size)
if method == 'noise':
shape = [1, input_depth, spatial_size[0], spatial_size[1]]
net_input = torch.zeros(shape)
fill_noise(net_input, noise_type)
net_input *= var
elif method == 'meshgrid':
assert input_depth == 2
X, Y = np.meshgrid(np.arange(0, spatial_size[1])/float(spatial_size[1]-1), np.arange(0, spatial_size[0])/float(spatial_size[0]-1))
meshgrid = np.concatenate([X[None,:], Y[None,:]])
net_input= np_to_torch(meshgrid)
else:
assert False
return net_input
def np_to_torch(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)[None, :]
def torch_to_np(img_var):
'''Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.detach().cpu().numpy()[0]
def fill_noise(x, noise_type):
"""Fills tensor `x` with noise of type `noise_type`."""
if noise_type == 'u':
x.uniform_()
elif noise_type == 'n':
x.normal_()
else:
assert False
def get_image_grid(images_np, nrow=8):
'''Creates a grid from a list of images by concatenating them.'''
images_torch = [torch.from_numpy(x) for x in images_np]
torch_grid = torchvision.utils.make_grid(images_torch, nrow)
return torch_grid.numpy()
def get_net(input_depth, NET_TYPE, pad, upsample_mode, n_channels=3,
act_fun='LeakyReLU', skip_n33d=128, skip_n33u=128, skip_n11=4,
num_scales=5, downsample_mode='stride'):
if NET_TYPE == 'ResNet':
# TODO
net = ResNet(input_depth, 3, 10, 16, 1, nn.BatchNorm2d, False)
elif NET_TYPE == 'skip':
net = skip(input_depth, n_channels, num_channels_down = [skip_n33d]*num_scales if isinstance(skip_n33d, int) else skip_n33d,
num_channels_up = [skip_n33u]*num_scales if isinstance(skip_n33u, int) else skip_n33u,
num_channels_skip = [skip_n11]*num_scales if isinstance(skip_n11, int) else skip_n11,
upsample_mode=upsample_mode, downsample_mode=downsample_mode,
need_sigmoid=True, need_bias=True, pad=pad, act_fun=act_fun)
elif NET_TYPE == 'texture_nets':
net = get_texture_nets(inp=input_depth, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False,pad=pad)
elif NET_TYPE =='UNet':
net = UNet(num_input_channels=input_depth, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode=upsample_mode, pad=pad, norm_layer=nn.BatchNorm2d, need_sigmoid=True, need_bias=True)
elif NET_TYPE == 'identity':
assert input_depth == 3
net = nn.Sequential()
else:
assert False
return net | 8,713 | 33.995984 | 138 | py |
DeepIR | DeepIR-main/modules/dmodels/skip.py | import torch
import torch.nn as nn
from .common import *
def skip(
num_input_channels=2, num_output_channels=3,
num_channels_down=[16, 32, 64, 128, 128], num_channels_up=[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4],
filter_size_down=3, filter_size_up=3, filter_skip_size=1,
need_sigmoid=True, need_bias=True,
pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU',
need1x1_up=True):
"""Assembles encoder-decoder with skip connections.
Arguments:
act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)
pad (string): zero|reflection (default: 'zero')
upsample_mode (string): 'nearest|bilinear' (default: 'nearest')
downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')
"""
assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)
n_scales = len(num_channels_down)
if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)) :
upsample_mode = [upsample_mode]*n_scales
if not (isinstance(downsample_mode, list)or isinstance(downsample_mode, tuple)):
downsample_mode = [downsample_mode]*n_scales
if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)) :
filter_size_down = [filter_size_down]*n_scales
if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)) :
filter_size_up = [filter_size_up]*n_scales
last_scale = n_scales - 1
cur_depth = None
model = nn.Sequential()
model_tmp = model
input_depth = num_input_channels
for i in range(len(num_channels_down)):
deeper = nn.Sequential()
skip = nn.Sequential()
if num_channels_skip[i] != 0:
model_tmp.add(Concat(1, skip, deeper))
else:
model_tmp.add(deeper)
model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))
if num_channels_skip[i] != 0:
skip.add(conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))
skip.add(bn(num_channels_skip[i]))
skip.add(act(act_fun))
# skip.add(Concat(2, GenNoise(nums_noise[i]), skip_part))
deeper.add(conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad, downsample_mode=downsample_mode[i]))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
deeper.add(conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
deeper_main = nn.Sequential()
if i == len(num_channels_down) - 1:
# The deepest
k = num_channels_down[i]
else:
deeper.add(deeper_main)
k = num_channels_up[i + 1]
deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i]))
model_tmp.add(conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
model_tmp.add(act(act_fun))
if need1x1_up:
model_tmp.add(conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
model_tmp.add(act(act_fun))
input_depth = num_channels_down[i]
model_tmp = deeper_main
model.add(conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))
if need_sigmoid:
model.add(nn.Sigmoid())
return model
| 3,744 | 36.079208 | 144 | py |
DeepIR | DeepIR-main/modules/dmodels/resnet.py | import torch
import torch.nn as nn
from numpy.random import normal
from numpy.linalg import svd
from math import sqrt
import torch.nn.init
from .common import *
class ResidualSequential(nn.Sequential):
def __init__(self, *args):
super(ResidualSequential, self).__init__(*args)
def forward(self, x):
out = super(ResidualSequential, self).forward(x)
# print(x.size(), out.size())
x_ = None
if out.size(2) != x.size(2) or out.size(3) != x.size(3):
diff2 = x.size(2) - out.size(2)
diff3 = x.size(3) - out.size(3)
# print(1)
x_ = x[:, :, diff2 /2:out.size(2) + diff2 / 2, diff3 / 2:out.size(3) + diff3 / 2]
else:
x_ = x
return out + x_
def eval(self):
print(2)
for m in self.modules():
m.eval()
exit()
def get_block(num_channels, norm_layer, act_fun):
layers = [
nn.Conv2d(num_channels, num_channels, 3, 1, 1, bias=False),
norm_layer(num_channels, affine=True),
act(act_fun),
nn.Conv2d(num_channels, num_channels, 3, 1, 1, bias=False),
norm_layer(num_channels, affine=True),
]
return layers
class ResNet(nn.Module):
def __init__(self, num_input_channels, num_output_channels, num_blocks, num_channels, need_residual=True, act_fun='LeakyReLU', need_sigmoid=True, norm_layer=nn.BatchNorm2d, pad='reflection'):
'''
pad = 'start|zero|replication'
'''
super(ResNet, self).__init__()
if need_residual:
s = ResidualSequential
else:
s = nn.Sequential
stride = 1
# First layers
layers = [
# nn.ReplicationPad2d(num_blocks * 2 * stride + 3),
conv(num_input_channels, num_channels, 3, stride=1, bias=True, pad=pad),
act(act_fun)
]
# Residual blocks
# layers_residual = []
for i in range(num_blocks):
layers += [s(*get_block(num_channels, norm_layer, act_fun))]
layers += [
nn.Conv2d(num_channels, num_channels, 3, 1, 1),
norm_layer(num_channels, affine=True)
]
# if need_residual:
# layers += [ResidualSequential(*layers_residual)]
# else:
# layers += [Sequential(*layers_residual)]
# if factor >= 2:
# # Do upsampling if needed
# layers += [
# nn.Conv2d(num_channels, num_channels *
# factor ** 2, 3, 1),
# nn.PixelShuffle(factor),
# act(act_fun)
# ]
layers += [
conv(num_channels, num_output_channels, 3, 1, bias=True, pad=pad),
nn.Sigmoid()
]
self.model = nn.Sequential(*layers)
def forward(self, input):
return self.model(input)
def eval(self):
self.model.eval()
| 2,943 | 29.350515 | 195 | py |
DeepIR | DeepIR-main/modules/dmodels/downsampler.py | import numpy as np
import torch
import torch.nn as nn
class Downsampler(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes, factor, kernel_type, phase=0, kernel_width=None, support=None, sigma=None, preserve_size=False):
super(Downsampler, self).__init__()
assert phase in [0, 0.5], 'phase should be 0 or 0.5'
if kernel_type == 'lanczos2':
support = 2
kernel_width = 4 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'lanczos3':
support = 3
kernel_width = 6 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'gauss12':
kernel_width = 7
sigma = 1/2
kernel_type_ = 'gauss'
elif kernel_type == 'gauss1sq2':
kernel_width = 9
sigma = 1./np.sqrt(2)
kernel_type_ = 'gauss'
elif kernel_type in ['lanczos', 'gauss', 'box']:
kernel_type_ = kernel_type
else:
assert False, 'wrong name kernel'
# note that `kernel width` will be different to actual size for phase = 1/2
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width, support=support, sigma=sigma)
downsampler = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[i, i] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) / 2.)
else:
pad = int((self.kernel.shape[0] - factor) / 2.)
self.padding = nn.ReplicationPad2d(pad)
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x= input
self.x = x
return self.downsampler_(x)
def get_kernel(factor, kernel_type, phase, kernel_width, support=None, sigma=None):
assert kernel_type in ['lanczos', 'gauss', 'box']
# factor = float(factor)
if phase == 0.5 and kernel_type != 'box':
kernel = np.zeros([kernel_width - 1, kernel_width - 1])
else:
kernel = np.zeros([kernel_width, kernel_width])
if kernel_type == 'box':
assert phase == 0.5, 'Box filter is always half-phased'
kernel[:] = 1./(kernel_width * kernel_width)
elif kernel_type == 'gauss':
assert sigma, 'sigma is not specified'
assert phase != 0.5, 'phase 1/2 for gauss not implemented'
center = (kernel_width + 1.)/2.
print(center, kernel_width)
sigma_sq = sigma * sigma
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
di = (i - center)/2.
dj = (j - center)/2.
kernel[i - 1][j - 1] = np.exp(-(di * di + dj * dj)/(2 * sigma_sq))
kernel[i - 1][j - 1] = kernel[i - 1][j - 1]/(2. * np.pi * sigma_sq)
elif kernel_type == 'lanczos':
assert support, 'support is not specified'
center = (kernel_width + 1) / 2.
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
if phase == 0.5:
di = abs(i + 0.5 - center) / factor
dj = abs(j + 0.5 - center) / factor
else:
di = abs(i - center) / factor
dj = abs(j - center) / factor
pi_sq = np.pi * np.pi
val = 1
if di != 0:
val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)
val = val / (np.pi * np.pi * di * di)
if dj != 0:
val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)
val = val / (np.pi * np.pi * dj * dj)
kernel[i - 1][j - 1] = val
else:
assert False, 'wrong method name'
kernel /= kernel.sum()
return kernel
#a = Downsampler(n_planes=3, factor=2, kernel_type='lanczos2', phase='1', preserve_size=True)
#################
# Learnable downsampler
# KS = 32
# dow = nn.Sequential(nn.ReplicationPad2d(int((KS - factor) / 2.)), nn.Conv2d(1,1,KS,factor))
# class Apply(nn.Module):
# def __init__(self, what, dim, *args):
# super(Apply, self).__init__()
# self.dim = dim
# self.what = what
# def forward(self, input):
# inputs = []
# for i in range(input.size(self.dim)):
# inputs.append(self.what(input.narrow(self.dim, i, 1)))
# return torch.cat(inputs, dim=self.dim)
# def __len__(self):
# return len(self._modules)
# downs = Apply(dow, 1)
# downs.type(dtype)(net_input.type(dtype)).size()
| 5,379 | 30.83432 | 129 | py |
DeepIR | DeepIR-main/modules/dmodels/dcgan.py | import torch
import torch.nn as nn
def dcgan(inp=2,
ndf=32,
num_ups=4, need_sigmoid=True, need_bias=True, pad='zero', upsample_mode='nearest', need_convT = True):
layers= [nn.ConvTranspose2d(inp, ndf, kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(ndf),
nn.LeakyReLU(True)]
for i in range(num_ups-3):
if need_convT:
layers += [ nn.ConvTranspose2d(ndf, ndf, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf),
nn.LeakyReLU(True)]
else:
layers += [ nn.Upsample(scale_factor=2, mode=upsample_mode),
nn.Conv2d(ndf, ndf, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ndf),
nn.LeakyReLU(True)]
if need_convT:
layers += [nn.ConvTranspose2d(ndf, 3, 4, 2, 1, bias=False),]
else:
layers += [nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(ndf, 3, kernel_size=3, stride=1, padding=1, bias=False)]
if need_sigmoid:
layers += [nn.Sigmoid()]
model =nn.Sequential(*layers)
return model | 1,244 | 35.617647 | 112 | py |
DeepIR | DeepIR-main/modules/dmodels/texture_nets.py | import torch
import torch.nn as nn
from .common import *
normalization = nn.BatchNorm2d
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero'):
if pad == 'zero':
return nn.Conv2d(in_f, out_f, kernel_size, stride, padding=(kernel_size - 1) / 2, bias=bias)
elif pad == 'reflection':
layers = [nn.ReflectionPad2d((kernel_size - 1) / 2),
nn.Conv2d(in_f, out_f, kernel_size, stride, padding=0, bias=bias)]
return nn.Sequential(*layers)
def get_texture_nets(inp=3, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False, pad='zero', need_sigmoid=False, conv_num=8, upsample_mode='nearest'):
for i in range(len(ratios)):
j = i + 1
seq = nn.Sequential()
tmp = nn.AvgPool2d(ratios[i], ratios[i])
seq.add(tmp)
if fill_noise:
seq.add(GenNoise(inp))
seq.add(conv(inp, conv_num, 3, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
seq.add(conv(conv_num, conv_num, 3, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
seq.add(conv(conv_num, conv_num, 1, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
if i == 0:
seq.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
cur = seq
else:
cur_temp = cur
cur = nn.Sequential()
# Batch norm before merging
seq.add(normalization(conv_num))
cur_temp.add(normalization(conv_num * (j - 1)))
cur.add(Concat(1, cur_temp, seq))
cur.add(conv(conv_num * j, conv_num * j, 3, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
cur.add(conv(conv_num * j, conv_num * j, 3, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
cur.add(conv(conv_num * j, conv_num * j, 1, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
if i == len(ratios) - 1:
cur.add(conv(conv_num * j, 3, 1, pad=pad))
else:
cur.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
model = cur
if need_sigmoid:
model.add(nn.Sigmoid())
return model
| 2,315 | 27.95 | 146 | py |
DeepIR | DeepIR-main/modules/dmodels/common.py | import torch
import torch.nn as nn
import numpy as np
from .downsampler import Downsampler
def add_module(self, module):
self.add_module(str(len(self) + 1), module)
torch.nn.Module.add = add_module
class Concat(nn.Module):
def __init__(self, dim, *args):
super(Concat, self).__init__()
self.dim = dim
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def forward(self, input):
inputs = []
for module in self._modules.values():
inputs.append(module(input))
inputs_shapes2 = [x.shape[2] for x in inputs]
inputs_shapes3 = [x.shape[3] for x in inputs]
if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)):
inputs_ = inputs
else:
target_shape2 = min(inputs_shapes2)
target_shape3 = min(inputs_shapes3)
inputs_ = []
for inp in inputs:
diff2 = (inp.size(2) - target_shape2) // 2
diff3 = (inp.size(3) - target_shape3) // 2
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3])
return torch.cat(inputs_, dim=self.dim)
def __len__(self):
return len(self._modules)
class GenNoise(nn.Module):
def __init__(self, dim2):
super(GenNoise, self).__init__()
self.dim2 = dim2
def forward(self, input):
a = list(input.size())
a[1] = self.dim2
# print (input.data.type())
b = torch.zeros(a).type_as(input.data)
b.normal_()
x = torch.autograd.Variable(b)
return x
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
def act(act_fun = 'LeakyReLU'):
'''
Either string defining an activation function or module (e.g. nn.ReLU)
'''
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun == 'ELU':
return nn.ELU()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
def bn(num_features):
return nn.BatchNorm2d(num_features)
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'):
downsampler = None
if stride != 1 and downsample_mode != 'stride':
if downsample_mode == 'avg':
downsampler = nn.AvgPool2d(stride, stride)
elif downsample_mode == 'max':
downsampler = nn.MaxPool2d(stride, stride)
elif downsample_mode in ['lanczos2', 'lanczos3']:
downsampler = Downsampler(n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True)
else:
assert False
stride = 1
padder = None
to_pad = int((kernel_size - 1) / 2)
if pad == 'reflection':
padder = nn.ReflectionPad2d(to_pad)
to_pad = 0
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
layers = filter(lambda x: x is not None, [padder, convolver, downsampler])
return nn.Sequential(*layers) | 3,531 | 27.483871 | 128 | py |
DeepIR | DeepIR-main/modules/dmodels/unet.py | import torch.nn as nn
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import *
class ListModule(nn.Module):
def __init__(self, *args):
super(ListModule, self).__init__()
idx = 0
for module in args:
self.add_module(str(idx), module)
idx += 1
def __getitem__(self, idx):
if idx >= len(self._modules):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx = len(self) + idx
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __iter__(self):
return iter(self._modules.values())
def __len__(self):
return len(self._modules)
class UNet(nn.Module):
'''
upsample_mode in ['deconv', 'nearest', 'bilinear']
pad in ['zero', 'replication', 'none']
'''
def __init__(self, num_input_channels=3, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode='deconv', pad='zero', norm_layer=nn.InstanceNorm2d, need_sigmoid=True, need_bias=True):
super(UNet, self).__init__()
self.feature_scale = feature_scale
self.more_layers = more_layers
self.concat_x = concat_x
filters = [64, 128, 256, 512, 1024]
filters = [x // self.feature_scale for x in filters]
self.start = unetConv2(num_input_channels, filters[0] if not concat_x else filters[0] - num_input_channels, norm_layer, need_bias, pad)
self.down1 = unetDown(filters[0], filters[1] if not concat_x else filters[1] - num_input_channels, norm_layer, need_bias, pad)
self.down2 = unetDown(filters[1], filters[2] if not concat_x else filters[2] - num_input_channels, norm_layer, need_bias, pad)
self.down3 = unetDown(filters[2], filters[3] if not concat_x else filters[3] - num_input_channels, norm_layer, need_bias, pad)
self.down4 = unetDown(filters[3], filters[4] if not concat_x else filters[4] - num_input_channels, norm_layer, need_bias, pad)
# more downsampling layers
if self.more_layers > 0:
self.more_downs = [
unetDown(filters[4], filters[4] if not concat_x else filters[4] - num_input_channels , norm_layer, need_bias, pad) for i in range(self.more_layers)]
self.more_ups = [unetUp(filters[4], upsample_mode, need_bias, pad, same_num_filt =True) for i in range(self.more_layers)]
self.more_downs = ListModule(*self.more_downs)
self.more_ups = ListModule(*self.more_ups)
self.up4 = unetUp(filters[3], upsample_mode, need_bias, pad)
self.up3 = unetUp(filters[2], upsample_mode, need_bias, pad)
self.up2 = unetUp(filters[1], upsample_mode, need_bias, pad)
self.up1 = unetUp(filters[0], upsample_mode, need_bias, pad)
self.final = conv(filters[0], num_output_channels, 1, bias=need_bias, pad=pad)
if need_sigmoid:
self.final = nn.Sequential(self.final, nn.Sigmoid())
def forward(self, inputs):
# Downsample
downs = [inputs]
down = nn.AvgPool2d(2, 2)
for i in range(4 + self.more_layers):
downs.append(down(downs[-1]))
in64 = self.start(inputs)
if self.concat_x:
in64 = torch.cat([in64, downs[0]], 1)
down1 = self.down1(in64)
if self.concat_x:
down1 = torch.cat([down1, downs[1]], 1)
down2 = self.down2(down1)
if self.concat_x:
down2 = torch.cat([down2, downs[2]], 1)
down3 = self.down3(down2)
if self.concat_x:
down3 = torch.cat([down3, downs[3]], 1)
down4 = self.down4(down3)
if self.concat_x:
down4 = torch.cat([down4, downs[4]], 1)
if self.more_layers > 0:
prevs = [down4]
for kk, d in enumerate(self.more_downs):
# print(prevs[-1].size())
out = d(prevs[-1])
if self.concat_x:
out = torch.cat([out, downs[kk + 5]], 1)
prevs.append(out)
up_ = self.more_ups[-1](prevs[-1], prevs[-2])
for idx in range(self.more_layers - 1):
l = self.more_ups[self.more - idx - 2]
up_= l(up_, prevs[self.more - idx - 2])
else:
up_= down4
up4= self.up4(up_, down3)
up3= self.up3(up4, down2)
up2= self.up2(up3, down1)
up1= self.up1(up2, in64)
return self.final(up1)
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetConv2, self).__init__()
print(pad)
if norm_layer is not None:
self.conv1= nn.Sequential(conv(in_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU(),)
self.conv2= nn.Sequential(conv(out_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU(),)
else:
self.conv1= nn.Sequential(conv(in_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU(),)
self.conv2= nn.Sequential(conv(out_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU(),)
def forward(self, inputs):
outputs= self.conv1(inputs)
outputs= self.conv2(outputs)
return outputs
class unetDown(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetDown, self).__init__()
self.conv= unetConv2(in_size, out_size, norm_layer, need_bias, pad)
self.down= nn.MaxPool2d(2, 2)
def forward(self, inputs):
outputs= self.down(inputs)
outputs= self.conv(outputs)
return outputs
class unetUp(nn.Module):
def __init__(self, out_size, upsample_mode, need_bias, pad, same_num_filt=False):
super(unetUp, self).__init__()
num_filt = out_size if same_num_filt else out_size * 2
if upsample_mode == 'deconv':
self.up= nn.ConvTranspose2d(num_filt, out_size, 4, stride=2, padding=1)
self.conv= unetConv2(out_size * 2, out_size, None, need_bias, pad)
elif upsample_mode=='bilinear' or upsample_mode=='nearest':
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode=upsample_mode),
conv(num_filt, out_size, 3, bias=need_bias, pad=pad))
self.conv= unetConv2(out_size * 2, out_size, None, need_bias, pad)
else:
assert False
def forward(self, inputs1, inputs2):
in1_up= self.up(inputs1)
if (inputs2.size(2) != in1_up.size(2)) or (inputs2.size(3) != in1_up.size(3)):
diff2 = (inputs2.size(2) - in1_up.size(2)) // 2
diff3 = (inputs2.size(3) - in1_up.size(3)) // 2
inputs2_ = inputs2[:, :, diff2 : diff2 + in1_up.size(2), diff3 : diff3 + in1_up.size(3)]
else:
inputs2_ = inputs2
output= self.conv(torch.cat([in1_up, inputs2_], 1))
return output
| 7,324 | 36.953368 | 164 | py |
DeepIR | DeepIR-main/modules/dmodels/__init__.py | from .skip import skip
from .texture_nets import get_texture_nets
from .resnet import ResNet
from .unet import UNet
import torch.nn as nn
def get_net(input_depth, NET_TYPE, pad, upsample_mode, n_channels=3, act_fun='LeakyReLU', skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=5, downsample_mode='stride'):
if NET_TYPE == 'ResNet':
# TODO
net = ResNet(input_depth, 3, 10, 16, 1, nn.BatchNorm2d, False)
elif NET_TYPE == 'skip':
net = skip(input_depth, n_channels, num_channels_down = [skip_n33d]*num_scales if isinstance(skip_n33d, int) else skip_n33d,
num_channels_up = [skip_n33u]*num_scales if isinstance(skip_n33u, int) else skip_n33u,
num_channels_skip = [skip_n11]*num_scales if isinstance(skip_n11, int) else skip_n11,
upsample_mode=upsample_mode, downsample_mode=downsample_mode,
need_sigmoid=True, need_bias=True, pad=pad, act_fun=act_fun)
elif NET_TYPE == 'texture_nets':
net = get_texture_nets(inp=input_depth, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False,pad=pad)
elif NET_TYPE =='UNet':
net = UNet(num_input_channels=input_depth, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode=upsample_mode, pad=pad, norm_layer=nn.BatchNorm2d, need_sigmoid=True, need_bias=True)
elif NET_TYPE == 'identity':
assert input_depth == 3
net = nn.Sequential()
else:
assert False
return net | 1,639 | 50.25 | 172 | py |
rude-carnie | rude-carnie-master/export.py | import tensorflow as tf
from model import select_model, get_checkpoint
from utils import RESIZE_AOI, RESIZE_FINAL
from tensorflow.python.framework import graph_util
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
import os
GENDER_LIST =['M','F']
AGE_LIST = ['(0, 2)','(4, 6)','(8, 12)','(15, 20)','(25, 32)','(38, 43)','(48, 53)','(60, 100)']
tf.app.flags.DEFINE_string('checkpoint', 'checkpoint',
'Checkpoint basename')
tf.app.flags.DEFINE_string('class_type', 'age',
'Classification type (age|gender)')
tf.app.flags.DEFINE_string('model_dir', '',
'Model directory (where training data lives)')
tf.app.flags.DEFINE_integer('model_version', 1,
"""Version number of the model.""")
tf.app.flags.DEFINE_string('output_dir', '/tmp/tf_exported_model/0',
'Export directory')
tf.app.flags.DEFINE_string('model_type', 'default',
'Type of convnet')
tf.app.flags.DEFINE_string('requested_step', '', 'Within the model directory, a requested step to restore e.g., 9000')
FLAGS = tf.app.flags.FLAGS
def preproc_jpeg(image_buffer):
image = tf.image.decode_jpeg(image_buffer, channels=3)
crop = tf.image.resize_images(image, (RESIZE_AOI, RESIZE_AOI))
# What??
crop = tf.image.resize_images(crop, (RESIZE_FINAL, RESIZE_FINAL))
image_out = tf.image.per_image_standardization(crop)
return image_out
def main(argv=None):
with tf.Graph().as_default():
serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
feature_configs = {
'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
jpegs = tf_example['image/encoded']
images = tf.map_fn(preproc_jpeg, jpegs, dtype=tf.float32)
label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
nlabels = len(label_list)
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
model_fn = select_model(FLAGS.model_type)
logits = model_fn(nlabels, images, 1, False)
softmax_output = tf.nn.softmax(logits)
values, indices = tf.nn.top_k(softmax_output, 2 if FLAGS.class_type == 'age' else 1)
class_tensor = tf.constant(label_list)
table = tf.contrib.lookup.index_to_string_table_from_tensor(class_tensor)
classes = table.lookup(tf.to_int64(indices))
requested_step = FLAGS.requested_step if FLAGS.requested_step else None
checkpoint_path = '%s' % (FLAGS.model_dir)
model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
saver = tf.train.Saver()
saver.restore(sess, model_checkpoint_path)
print('Restored model checkpoint %s' % model_checkpoint_path)
output_path = os.path.join(
tf.compat.as_bytes(FLAGS.output_dir),
tf.compat.as_bytes(str(FLAGS.model_version)))
print('Exporting trained model to %s' % output_path)
builder = tf.saved_model.builder.SavedModelBuilder(output_path)
# Build the signature_def_map.
classify_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(
serialized_tf_example)
classes_output_tensor_info = tf.saved_model.utils.build_tensor_info(
classes)
scores_output_tensor_info = tf.saved_model.utils.build_tensor_info(values)
classification_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={
tf.saved_model.signature_constants.CLASSIFY_INPUTS:
classify_inputs_tensor_info
},
outputs={
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
classes_output_tensor_info,
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
scores_output_tensor_info
},
method_name=tf.saved_model.signature_constants.
CLASSIFY_METHOD_NAME))
predict_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(jpegs)
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'images': predict_inputs_tensor_info},
outputs={
'classes': classes_output_tensor_info,
'scores': scores_output_tensor_info
},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
))
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'predict_images':
prediction_signature,
tf.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY:
classification_signature,
},
legacy_init_op=legacy_init_op)
builder.save()
print('Successfully exported model to %s' % FLAGS.output_dir)
if __name__ == '__main__':
tf.app.run()
| 5,970 | 43.559701 | 118 | py |
rude-carnie | rude-carnie-master/detect.py | import numpy as np
import cv2
FACE_PAD = 50
class ObjectDetector(object):
def __init__(self):
pass
def run(self, image_file):
pass
# OpenCV's cascade object detector
class ObjectDetectorCascadeOpenCV(ObjectDetector):
def __init__(self, model_name, basename='frontal-face', tgtdir='.', min_height_dec=20, min_width_dec=20,
min_height_thresh=50, min_width_thresh=50):
self.min_height_dec = min_height_dec
self.min_width_dec = min_width_dec
self.min_height_thresh = min_height_thresh
self.min_width_thresh = min_width_thresh
self.tgtdir = tgtdir
self.basename = basename
self.face_cascade = cv2.CascadeClassifier(model_name)
def run(self, image_file):
print(image_file)
img = cv2.imread(image_file)
min_h = int(max(img.shape[0] / self.min_height_dec, self.min_height_thresh))
min_w = int(max(img.shape[1] / self.min_width_dec, self.min_width_thresh))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, minNeighbors=5, minSize=(min_h, min_w))
images = []
for i, (x, y, w, h) in enumerate(faces):
images.append(self.sub_image('%s/%s-%d.jpg' % (self.tgtdir, self.basename, i + 1), img, x, y, w, h))
print('%d faces detected' % len(images))
for (x, y, w, h) in faces:
self.draw_rect(img, x, y, w, h)
# Fix in case nothing found in the image
outfile = '%s/%s.jpg' % (self.tgtdir, self.basename)
cv2.imwrite(outfile, img)
return images, outfile
def sub_image(self, name, img, x, y, w, h):
upper_cut = [min(img.shape[0], y + h + FACE_PAD), min(img.shape[1], x + w + FACE_PAD)]
lower_cut = [max(y - FACE_PAD, 0), max(x - FACE_PAD, 0)]
roi_color = img[lower_cut[0]:upper_cut[0], lower_cut[1]:upper_cut[1]]
cv2.imwrite(name, roi_color)
return name
def draw_rect(self, img, x, y, w, h):
upper_cut = [min(img.shape[0], y + h + FACE_PAD), min(img.shape[1], x + w + FACE_PAD)]
lower_cut = [max(y - FACE_PAD, 0), max(x - FACE_PAD, 0)]
cv2.rectangle(img, (lower_cut[1], lower_cut[0]), (upper_cut[1], upper_cut[0]), (255, 0, 0), 2)
| 2,287 | 39.140351 | 112 | py |
rude-carnie | rude-carnie-master/yolodetect.py | from detect import ObjectDetector
import numpy as np
import tensorflow as tf
import cv2
class YOLOBase(ObjectDetector):
def __init__(self):
pass
def _conv_layer(self, idx, inputs, filters, size, stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size, size, int(channels), filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size // 2
pad_mat = np.array([[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]])
inputs_pad = tf.pad(inputs, pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',
name=str(idx) + '_conv')
conv_biased = tf.add(conv, biases, name=str(idx) + '_conv_biased')
return tf.maximum(self.alpha * conv_biased, conv_biased, name=str(idx) + '_leaky_relu')
def _pooling_layer(self, idx, inputs, size, stride):
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1], strides=[1, stride, stride, 1], padding='SAME',
name=str(idx) + '_pool')
def _fc_layer(self, idx, inputs, hiddens, flat=False, linear=False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1] * input_shape[2] * input_shape[3]
inputs_transposed = tf.transpose(inputs, (0, 3, 1, 2))
inputs_processed = tf.reshape(inputs_transposed, [-1, dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim, hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if linear: return tf.add(tf.matmul(inputs_processed, weight), biases, name=str(idx) + '_fc')
ip = tf.add(tf.matmul(inputs_processed, weight), biases)
return tf.maximum(self.alpha * ip, ip, name=str(idx) + '_fc')
def _init_base_model(self):
self.x = tf.placeholder('float32', [None, 448, 448, 3])
conv_1 = self._conv_layer(1, self.x, 16, 3, 1)
pool_2 = self._pooling_layer(2, conv_1, 2, 2)
conv_3 = self._conv_layer(3, pool_2, 32, 3, 1)
pool_4 = self._pooling_layer(4, conv_3, 2, 2)
conv_5 = self._conv_layer(5, pool_4, 64, 3, 1)
pool_6 = self._pooling_layer(6, conv_5, 2, 2)
conv_7 = self._conv_layer(7, pool_6, 128, 3, 1)
pool_8 = self._pooling_layer(8, conv_7, 2, 2)
conv_9 = self._conv_layer(9, pool_8, 256, 3, 1)
pool_10 = self._pooling_layer(10, conv_9, 2, 2)
conv_11 = self._conv_layer(11, pool_10, 512, 3, 1)
pool_12 = self._pooling_layer(12, conv_11, 2, 2)
conv_13 = self._conv_layer(13, pool_12, 1024, 3, 1)
conv_14 = self._conv_layer(14, conv_13, 1024, 3, 1)
conv_15 = self._conv_layer(15, conv_14, 1024, 3, 1)
fc_16 = self._fc_layer(16, conv_15, 256, flat=True, linear=False)
return self._fc_layer(17, fc_16, 4096, flat=False, linear=False)
def _iou(self, box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - max(box1[0] - 0.5 * box1[2],
box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - max(box1[1] - 0.5 * box1[3],
box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
return intersection / (box1[2] * box1[3] + box2[2] * box2[3] - intersection)
def sub_image(self, name, img, x, y, w, h):
half_w = w // 2
half_h = h // 2
upper_cut = [y + half_h, x + half_w]
lower_cut = [y - half_h, x - half_w];
roi_color = img[lower_cut[0]:upper_cut[0], lower_cut[1]:upper_cut[1]]
cv2.imwrite(name, roi_color)
return name
def draw_rect(self, img, x, y, w, h):
half_w = w // 2
half_h = h // 2
upper_cut = [y + half_h, x + half_w]
lower_cut = [y - half_h, x - half_w];
cv2.rectangle(img, (lower_cut[1], lower_cut[0]), (upper_cut[1], upper_cut[0]), (0, 255, 0), 2)
def run(self, filename):
img = cv2.imread(filename)
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_19, feed_dict=in_dict)
faces = self.interpret_output(net_output[0])
images = []
for i, (x, y, w, h, p) in enumerate(faces):
images.append(self.sub_image('%s/%s-%d.jpg' % (self.tgtdir, self.basename, i + 1), img, x, y, w, h))
print('%d faces detected' % len(images))
for (x, y, w, h, p) in faces:
print('Face found [%d, %d, %d, %d] (%.2f)' % (x, y, w, h, p));
self.draw_rect(img, x, y, w, h)
# Fix in case nothing found in the image
outfile = '%s/%s.jpg' % (self.tgtdir, self.basename)
cv2.imwrite(outfile, img)
return images, outfile
def __init__(self, model_name, basename, tgtdir, alpha, threshold, iou_threshold):
self.alpha = alpha
self.threshold = threshold
self.iou_threshold = iou_threshold
self.basename = basename
self.tgtdir = tgtdir
self.load_model(model_name)
class PersonDetectorYOLOTiny(YOLOBase):
def __init__(self, model_name, basename='frontal-face', tgtdir='.', alpha=0.1, threshold=0.2, iou_threshold=0.5):
self.alpha = alpha
self.threshold = threshold
self.iou_threshold = iou_threshold
self.basename = basename
self.tgtdir = tgtdir
self.load_model(model_name)
def load_model(self, model_name):
g = tf.Graph()
with g.as_default():
fc_17 = self._init_base_model()
# skip dropout_18
self.fc_19 = self._fc_layer(19, fc_17, 1470, flat=False, linear=True)
self.sess = tf.Session(graph=g)
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, model_name)
def interpret_output(self, output):
probs = np.zeros((7, 7, 2, 20))
class_probs = np.reshape(output[0:980], (7, 7, 20))
scales = np.reshape(output[980:1078], (7, 7, 2))
boxes = np.reshape(output[1078:], (7, 7, 2, 4))
offset = np.transpose(np.reshape(np.array([np.arange(7)] * 14), (2, 7, 7)), (1, 2, 0))
boxes[:, :, :, 0] += offset
boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
boxes[:, :, :, 0:2] = boxes[:, :, :, 0:2] / 7.0
boxes[:, :, :, 2] = np.multiply(boxes[:, :, :, 2], boxes[:, :, :, 2])
boxes[:, :, :, 3] = np.multiply(boxes[:, :, :, 3], boxes[:, :, :, 3])
boxes[:, :, :, 0] *= self.w_img
boxes[:, :, :, 1] *= self.h_img
boxes[:, :, :, 2] *= self.w_img
boxes[:, :, :, 3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:, :, i, j] = np.multiply(class_probs[:, :, j], scales[:, :, i])
filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs, axis=3)[
filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0:
continue
for j in range(i + 1, len(boxes_filtered)):
if self._iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
if classes_num_filtered[i] == 14:
result.append([int(boxes_filtered[i][0]),
int(boxes_filtered[i][1]),
int(boxes_filtered[i][2]),
int(boxes_filtered[i][3]),
probs_filtered[i]])
return result
# This model doesnt seem to work particularly well on data I have tried
class FaceDetectorYOLO(YOLOBase):
def __init__(self, model_name, basename='frontal-face', tgtdir='.', alpha=0.1, threshold=0.2, iou_threshold=0.5):
self.alpha = alpha
self.threshold = threshold
self.iou_threshold = iou_threshold
self.basename = basename
self.tgtdir = tgtdir
self.load_model(model_name)
def load_model(self, model_name):
g = tf.Graph()
with g.as_default():
fc_17 = self._init_base_model()
# skip dropout_18
self.fc_19 = self._fc_layer(19, fc_17, 1331, flat=False, linear=True)
self.sess = tf.Session(graph=g)
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, model_name)
def interpret_output(self, output):
prob_range = [0, 11 * 11 * 1]
scales_range = [prob_range[1], prob_range[1] + 11 * 11 * 2]
boxes_range = [scales_range[1], scales_range[1] + 11 * 11 * 2 * 4]
probs = np.zeros((11, 11, 2, 1))
class_probs = np.reshape(output[0:prob_range[1]], (11, 11, 1))
scales = np.reshape(output[scales_range[0]:scales_range[1]], (11, 11, 2))
boxes = np.reshape(output[boxes_range[0]:], (11, 11, 2, 4))
offset = np.transpose(np.reshape(np.array([np.arange(11)] * (2 * 11)), (2, 11, 11)), (1, 2, 0))
boxes[:, :, :, 0] += offset
boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
boxes[:, :, :, 0:2] = boxes[:, :, :, 0:2] / float(11)
boxes[:, :, :, 2] = np.multiply(boxes[:, :, :, 2], boxes[:, :, :, 2])
boxes[:, :, :, 3] = np.multiply(boxes[:, :, :, 3], boxes[:, :, :, 3])
boxes[:, :, :, 0] *= self.w_img
boxes[:, :, :, 1] *= self.h_img
boxes[:, :, :, 2] *= self.w_img
boxes[:, :, :, 3] *= self.h_img
for i in range(2):
probs[:, :, i, 0] = np.multiply(class_probs[:, :, 0], scales[:, :, i])
filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs, axis=3)[
filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0: continue
for j in range(i + 1, len(boxes_filtered)):
if self._iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([int(boxes_filtered[i][0]),
int(boxes_filtered[i][1]),
int(boxes_filtered[i][2]),
int(boxes_filtered[i][3]),
probs_filtered[i]])
return result
| 12,603 | 43.380282 | 117 | py |
rude-carnie | rude-carnie-master/filter_by_face.py | import numpy as np
import tensorflow as tf
import os
import cv2
import time
import sys
from utils import *
import csv
# YOLO tiny
#python fd.py --filename /media/dpressel/xdata/insights/converted/ --face_detection_model weights/YOLO_tiny.ckpt --face_detection_type yolo_tiny --target yolo.csv
# CV2
#python fd.py --filename /media/dpressel/xdata/insights/converted/ --face_detection_model /usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml --target cascade.csv
tf.app.flags.DEFINE_string('filename', '',
'File (Image) or File list (Text/No header TSV) to process')
tf.app.flags.DEFINE_string('face_detection_model', '', 'Do frontal face detection with model specified')
tf.app.flags.DEFINE_string('face_detection_type', 'cascade', 'Face detection model type (yolo_tiny|cascade)')
tf.app.flags.DEFINE_string('target', None, 'Target file name (defaults to {face_detection_model}.csv')
FACE_PAD = 0
FLAGS = tf.app.flags.FLAGS
def list_images(srcfile):
with open(srcfile, 'r') as csvfile:
delim = ',' if srcfile.endswith('.csv') else '\t'
reader = csv.reader(csvfile, delimiter=delim)
if srcfile.endswith('.csv') or srcfile.endswith('.tsv'):
print('skipping header')
_ = next(reader)
return [row[0] for row in reader]
def main(argv=None): # pylint: disable=unused-argument
fd = face_detection_model(FLAGS.face_detection_type, FLAGS.face_detection_model)
files = []
contains_faces = []
target = FLAGS.target = '%s.csv' % FLAGS.face_detection_type if FLAGS.target is None else FLAGS.target
print('Creating output file %s' % target)
output = open(target, 'w')
writer = csv.writer(output)
writer.writerow(('file_with_face',))
if FLAGS.filename is not None:
if os.path.isdir(FLAGS.filename):
for relpath in os.listdir(FLAGS.filename):
abspath = os.path.join(FLAGS.filename, relpath)
if os.path.isfile(abspath) and any([abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
print(abspath)
files.append(abspath)
elif any([FLAGS.filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
files = list_images(FLAGS.filename)
else:
files = [FLAGS.filename]
for f in files:
try:
images, outfile = fd.run(f)
if len(images):
print(f, 'YES')
writer.writerow((f,))
contains_faces.append(f)
else:
print(f, 'NO')
except Exception as e:
print(e)
if __name__=='__main__':
tf.app.run()
| 2,721 | 34.350649 | 177 | py |
rude-carnie | rude-carnie-master/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six.moves
from datetime import datetime
import sys
import math
import time
from data import inputs, standardize_image
import numpy as np
import tensorflow as tf
from detect import *
import re
RESIZE_AOI = 256
RESIZE_FINAL = 227
# Modifed from here
# http://stackoverflow.com/questions/3160699/python-progress-bar#3160819
class ProgressBar(object):
DEFAULT = 'Progress: %(bar)s %(percent)3d%%'
FULL = '%(bar)s %(current)d/%(total)d (%(percent)3d%%) %(remaining)d to go'
def __init__(self, total, width=40, fmt=DEFAULT, symbol='='):
assert len(symbol) == 1
self.total = total
self.width = width
self.symbol = symbol
self.fmt = re.sub(r'(?P<name>%\(.+?\))d',
r'\g<name>%dd' % len(str(total)), fmt)
self.current = 0
def update(self, step=1):
self.current += step
percent = self.current / float(self.total)
size = int(self.width * percent)
remaining = self.total - self.current
bar = '[' + self.symbol * size + ' ' * (self.width - size) + ']'
args = {
'total': self.total,
'bar': bar,
'current': self.current,
'percent': percent * 100,
'remaining': remaining
}
six.print_('\r' + self.fmt % args, end='')
def done(self):
self.current = self.total
self.update(step=0)
print('')
# Read image files
class ImageCoder(object):
def __init__(self):
# Create a single Session to run all image coding calls.
config = tf.ConfigProto(allow_soft_placement=True)
self._sess = tf.Session(config=config)
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
self.crop = tf.image.resize_images(self._decode_jpeg, (RESIZE_AOI, RESIZE_AOI))
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self.crop, #self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def make_multi_image_batch(filenames, coder):
"""Process a multi-image batch, each with a single-look
Args:
filenames: list of paths
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
"""
images = []
for filename in filenames:
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
image = coder.decode_jpeg(image_data)
crop = tf.image.resize_images(image, (RESIZE_FINAL, RESIZE_FINAL))
image = standardize_image(crop)
images.append(image)
image_batch = tf.stack(images)
return image_batch
def make_multi_crop_batch(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
image = coder.decode_jpeg(image_data)
crops = []
print('Running multi-cropped image')
h = image.shape[0]
w = image.shape[1]
hl = h - RESIZE_FINAL
wl = w - RESIZE_FINAL
crop = tf.image.resize_images(image, (RESIZE_FINAL, RESIZE_FINAL))
crops.append(standardize_image(crop))
crops.append(standardize_image(tf.image.flip_left_right(crop)))
corners = [ (0, 0), (0, wl), (hl, 0), (hl, wl), (int(hl/2), int(wl/2))]
for corner in corners:
ch, cw = corner
cropped = tf.image.crop_to_bounding_box(image, ch, cw, RESIZE_FINAL, RESIZE_FINAL)
crops.append(standardize_image(cropped))
flipped = standardize_image(tf.image.flip_left_right(cropped))
crops.append(standardize_image(flipped))
image_batch = tf.stack(crops)
return image_batch
def face_detection_model(model_type, model_path):
model_type_lc = model_type.lower()
if model_type_lc == 'yolo_tiny':
from yolodetect import PersonDetectorYOLOTiny
return PersonDetectorYOLOTiny(model_path)
elif model_type_lc == 'yolo_face':
from yolodetect import FaceDetectorYOLO
return FaceDetectorYOLO(model_path)
elif model_type == 'dlib':
from dlibdetect import FaceDetectorDlib
return FaceDetectorDlib(model_path)
return ObjectDetectorCascadeOpenCV(model_path)
| 5,920 | 32.078212 | 90 | py |
rude-carnie | rude-carnie-master/model.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import os
import numpy as np
import tensorflow as tf
from data import distorted_inputs
import re
from tensorflow.contrib.layers import *
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base
TOWER_NAME = 'tower'
def select_model(name):
if name.startswith('inception'):
print('selected (fine-tuning) inception model')
return inception_v3
elif name == 'bn':
print('selected batch norm model')
return levi_hassner_bn
print('selected default model')
return levi_hassner
def get_checkpoint(checkpoint_path, requested_step=None, basename='checkpoint'):
if requested_step is not None:
model_checkpoint_path = '%s/%s-%s' % (checkpoint_path, basename, requested_step)
if os.path.exists(model_checkpoint_path) is None:
print('No checkpoint file found at [%s]' % checkpoint_path)
exit(-1)
print(model_checkpoint_path)
print(model_checkpoint_path)
return model_checkpoint_path, requested_step
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
# Restore checkpoint as described in top of this program
print(ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
return ckpt.model_checkpoint_path, global_step
else:
print('No checkpoint file found at [%s]' % checkpoint_path)
exit(-1)
def _activation_summary(x):
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def inception_v3(nlabels, images, pkeep, is_training):
batch_norm_params = {
"is_training": is_training,
"trainable": True,
# Decay for the moving averages.
"decay": 0.9997,
# Epsilon to prevent 0s in variance.
"epsilon": 0.001,
# Collection containing the moving mean and moving variance.
"variables_collections": {
"beta": None,
"gamma": None,
"moving_mean": ["moving_vars"],
"moving_variance": ["moving_vars"],
}
}
weight_decay = 0.00004
stddev=0.1
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
with tf.variable_scope("InceptionV3", "InceptionV3", [images]) as scope:
with tf.contrib.slim.arg_scope(
[tf.contrib.slim.conv2d, tf.contrib.slim.fully_connected],
weights_regularizer=weights_regularizer,
trainable=True):
with tf.contrib.slim.arg_scope(
[tf.contrib.slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu,
normalizer_fn=batch_norm,
normalizer_params=batch_norm_params):
net, end_points = inception_v3_base(images, scope=scope)
with tf.variable_scope("logits"):
shape = net.get_shape()
net = avg_pool2d(net, shape[1:3], padding="VALID", scope="pool")
net = tf.nn.dropout(net, pkeep, name='droplast')
net = flatten(net, scope="flatten")
with tf.variable_scope('output') as scope:
weights = tf.Variable(tf.truncated_normal([2048, nlabels], mean=0.0, stddev=0.01), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nlabels], dtype=tf.float32), name='biases')
output = tf.add(tf.matmul(net, weights), biases, name=scope.name)
_activation_summary(output)
return output
def levi_hassner_bn(nlabels, images, pkeep, is_training):
batch_norm_params = {
"is_training": is_training,
"trainable": True,
# Decay for the moving averages.
"decay": 0.9997,
# Epsilon to prevent 0s in variance.
"epsilon": 0.001,
# Collection containing the moving mean and moving variance.
"variables_collections": {
"beta": None,
"gamma": None,
"moving_mean": ["moving_vars"],
"moving_variance": ["moving_vars"],
}
}
weight_decay = 0.0005
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
with tf.variable_scope("LeviHassnerBN", "LeviHassnerBN", [images]) as scope:
with tf.contrib.slim.arg_scope(
[convolution2d, fully_connected],
weights_regularizer=weights_regularizer,
biases_initializer=tf.constant_initializer(1.),
weights_initializer=tf.random_normal_initializer(stddev=0.005),
trainable=True):
with tf.contrib.slim.arg_scope(
[convolution2d],
weights_initializer=tf.random_normal_initializer(stddev=0.01),
normalizer_fn=batch_norm,
normalizer_params=batch_norm_params):
conv1 = convolution2d(images, 96, [7,7], [4, 4], padding='VALID', biases_initializer=tf.constant_initializer(0.), scope='conv1')
pool1 = max_pool2d(conv1, 3, 2, padding='VALID', scope='pool1')
conv2 = convolution2d(pool1, 256, [5, 5], [1, 1], padding='SAME', scope='conv2')
pool2 = max_pool2d(conv2, 3, 2, padding='VALID', scope='pool2')
conv3 = convolution2d(pool2, 384, [3, 3], [1, 1], padding='SAME', biases_initializer=tf.constant_initializer(0.), scope='conv3')
pool3 = max_pool2d(conv3, 3, 2, padding='VALID', scope='pool3')
# can use tf.contrib.layer.flatten
flat = tf.reshape(pool3, [-1, 384*6*6], name='reshape')
full1 = fully_connected(flat, 512, scope='full1')
drop1 = tf.nn.dropout(full1, pkeep, name='drop1')
full2 = fully_connected(drop1, 512, scope='full2')
drop2 = tf.nn.dropout(full2, pkeep, name='drop2')
with tf.variable_scope('output') as scope:
weights = tf.Variable(tf.random_normal([512, nlabels], mean=0.0, stddev=0.01), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nlabels], dtype=tf.float32), name='biases')
output = tf.add(tf.matmul(drop2, weights), biases, name=scope.name)
return output
def levi_hassner(nlabels, images, pkeep, is_training):
weight_decay = 0.0005
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
with tf.variable_scope("LeviHassner", "LeviHassner", [images]) as scope:
with tf.contrib.slim.arg_scope(
[convolution2d, fully_connected],
weights_regularizer=weights_regularizer,
biases_initializer=tf.constant_initializer(1.),
weights_initializer=tf.random_normal_initializer(stddev=0.005),
trainable=True):
with tf.contrib.slim.arg_scope(
[convolution2d],
weights_initializer=tf.random_normal_initializer(stddev=0.01)):
conv1 = convolution2d(images, 96, [7,7], [4, 4], padding='VALID', biases_initializer=tf.constant_initializer(0.), scope='conv1')
pool1 = max_pool2d(conv1, 3, 2, padding='VALID', scope='pool1')
norm1 = tf.nn.local_response_normalization(pool1, 5, alpha=0.0001, beta=0.75, name='norm1')
conv2 = convolution2d(norm1, 256, [5, 5], [1, 1], padding='SAME', scope='conv2')
pool2 = max_pool2d(conv2, 3, 2, padding='VALID', scope='pool2')
norm2 = tf.nn.local_response_normalization(pool2, 5, alpha=0.0001, beta=0.75, name='norm2')
conv3 = convolution2d(norm2, 384, [3, 3], [1, 1], biases_initializer=tf.constant_initializer(0.), padding='SAME', scope='conv3')
pool3 = max_pool2d(conv3, 3, 2, padding='VALID', scope='pool3')
flat = tf.reshape(pool3, [-1, 384*6*6], name='reshape')
full1 = fully_connected(flat, 512, scope='full1')
drop1 = tf.nn.dropout(full1, pkeep, name='drop1')
full2 = fully_connected(drop1, 512, scope='full2')
drop2 = tf.nn.dropout(full2, pkeep, name='drop2')
with tf.variable_scope('output') as scope:
weights = tf.Variable(tf.random_normal([512, nlabels], mean=0.0, stddev=0.01), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nlabels], dtype=tf.float32), name='biases')
output = tf.add(tf.matmul(drop2, weights), biases, name=scope.name)
return output
| 8,852 | 44.168367 | 144 | py |
rude-carnie | rude-carnie-master/data.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import numpy as np
import tensorflow as tf
from distutils.version import LooseVersion
VERSION_GTE_0_12_0 = LooseVersion(tf.__version__) >= LooseVersion('0.12.0')
# Name change in TF v 0.12.0
if VERSION_GTE_0_12_0:
standardize_image = tf.image.per_image_standardization
else:
standardize_image = tf.image.per_image_whitening
def data_files(data_dir, subset):
"""Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset.
"""
if subset not in ['train', 'validation']:
print('Invalid subset!')
exit(-1)
tf_record_pattern = os.path.join(data_dir, '%s-*' % subset)
data_files = tf.gfile.Glob(tf_record_pattern)
print(data_files)
if not data_files:
print('No files found for data dir %s at %s' % (subset, data_dir))
exit(-1)
return data_files
def decode_jpeg(image_buffer, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_image(image, height, width):
# Image processing for training the network. Note the many random
# distortions applied to the image.
distorted_image = tf.random_crop(image, [height, width, 3])
#distorted_image = tf.image.resize_images(image, [height, width])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
return distorted_image
def _is_tensor(x):
return isinstance(x, (tf.Tensor, tf.Variable))
def eval_image(image, height, width):
return tf.image.resize_images(image, [height, width])
def data_normalization(image):
image = standardize_image(image)
return image
def image_preprocessing(image_buffer, image_size, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
image = decode_jpeg(image_buffer)
if train:
image = distort_image(image, image_size, image_size)
else:
image = eval_image(image, image_size, image_size)
image = data_normalization(image)
return image
def parse_example_proto(example_serialized):
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/filename': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/height': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/width': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
}
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
return features['image/encoded'], label, features['image/filename']
def batch_inputs(data_dir, batch_size, image_size, train, num_preprocess_threads=4,
num_readers=1, input_queue_memory_factor=16):
with tf.name_scope('batch_processing'):
if train:
files = data_files(data_dir, 'train')
filename_queue = tf.train.string_input_producer(files,
shuffle=True,
capacity=16)
else:
files = data_files(data_dir, 'validation')
filename_queue = tf.train.string_input_producer(files,
shuffle=False,
capacity=1)
if num_preprocess_threads % 4:
raise ValueError('Please make num_preprocess_threads a multiple '
'of 4 (%d % 4 != 0).', num_preprocess_threads)
if num_readers < 1:
raise ValueError('Please make num_readers at least 1')
# Approximate number of examples per shard.
examples_per_shard = 1024
# Size the random shuffle queue to balance between good global
# mixing (more examples) and memory use (fewer examples).
# 1 image uses 299*299*3*4 bytes = 1MB
# The default input_queue_memory_factor is 16 implying a shuffling queue
# size: examples_per_shard * 16 * 1MB = 17.6GB
min_queue_examples = examples_per_shard * input_queue_memory_factor
if train:
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(
capacity=examples_per_shard + 3 * batch_size,
dtypes=[tf.string])
# Create multiple readers to populate the queue of examples.
if num_readers > 1:
enqueue_ops = []
for _ in range(num_readers):
reader = tf.TFRecordReader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(
tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
reader = tf.TFRecordReader()
_, example_serialized = reader.read(filename_queue)
images_labels_fnames = []
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
image_buffer, label_index, fname = parse_example_proto(example_serialized)
image = image_preprocessing(image_buffer, image_size, train, thread_id)
images_labels_fnames.append([image, label_index, fname])
images, label_index_batch, fnames = tf.train.batch_join(
images_labels_fnames,
batch_size=batch_size,
capacity=2 * num_preprocess_threads * batch_size)
images = tf.cast(images, tf.float32)
images = tf.reshape(images, shape=[batch_size, image_size, image_size, 3])
# Display the training images in the visualizer.
tf.summary.image('images', images, 20)
return images, tf.reshape(label_index_batch, [batch_size]), fnames
def inputs(data_dir, batch_size=128, image_size=227, train=False, num_preprocess_threads=4):
with tf.device('/cpu:0'):
images, labels, filenames = batch_inputs(
data_dir, batch_size, image_size, train,
num_preprocess_threads=num_preprocess_threads,
num_readers=1)
return images, labels, filenames
def distorted_inputs(data_dir, batch_size=128, image_size=227, num_preprocess_threads=4):
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels, filenames = batch_inputs(
data_dir, batch_size, image_size, train=True,
num_preprocess_threads=num_preprocess_threads,
num_readers=1)
return images, labels, filenames
| 8,764 | 36.139831 | 92 | py |
rude-carnie | rude-carnie-master/preproc.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
import json
RESIZE_HEIGHT = 256
RESIZE_WIDTH = 256
tf.app.flags.DEFINE_string('fold_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/train_val_txt_files_per_fold/test_fold_is_0',
'Fold directory')
tf.app.flags.DEFINE_string('data_dir', '/data/xdata/age-gender/aligned',
'Data directory')
tf.app.flags.DEFINE_string('output_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/tf/test_fold_is_0',
'Output directory')
tf.app.flags.DEFINE_string('train_list', 'age_train.txt',
'Training list')
tf.app.flags.DEFINE_string('valid_list', 'age_val.txt',
'Test list')
tf.app.flags.DEFINE_integer('train_shards', 10,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('valid_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'image/class/label': _int64_feature(label),
'image/filename': _bytes_feature(str.encode(os.path.basename(filename))),
'image/encoded': _bytes_feature(image_buffer),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width)
}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
cropped = tf.image.resize_images(self._decode_jpeg, [RESIZE_HEIGHT, RESIZE_WIDTH])
cropped = tf.cast(cropped, tf.uint8)
self._recoded = tf.image.encode_jpeg(cropped, format='rgb', quality=100)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def resample_jpeg(self, image_data):
image = self._sess.run(self._recoded, #self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.resample_jpeg(image_data)
return image, RESIZE_HEIGHT, RESIZE_WIDTH
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = int(labels[i])
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(list_file, data_dir):
print('Determining list of input files and labels from %s.' % list_file)
files_labels = [l.strip().split(' ') for l in tf.gfile.FastGFile(
list_file, 'r').readlines()]
labels = []
filenames = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for path, label in files_labels:
jpeg_file_path = '%s/%s' % (data_dir, path)
if os.path.exists(jpeg_file_path):
filenames.append(jpeg_file_path)
labels.append(label)
unique_labels = set(labels)
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, labels
def _process_dataset(name, filename, directory, num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, labels = _find_image_files(filename, directory)
_process_image_files(name, filenames, labels, num_shards)
unique_labels = set(labels)
return len(labels), unique_labels
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.valid_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.valid_shards')
print('Saving results to %s' % FLAGS.output_dir)
if os.path.exists(FLAGS.output_dir) is False:
print('creating %s' % FLAGS.output_dir)
os.makedirs(FLAGS.output_dir)
# Run it!
valid, valid_outcomes = _process_dataset('validation', '%s/%s' % (FLAGS.fold_dir, FLAGS.valid_list), FLAGS.data_dir,
FLAGS.valid_shards)
train, train_outcomes = _process_dataset('train', '%s/%s' % (FLAGS.fold_dir, FLAGS.train_list), FLAGS.data_dir,
FLAGS.train_shards)
if len(valid_outcomes) != len(valid_outcomes | train_outcomes):
print('Warning: unattested labels in training data [%s]' % (', '.join((valid_outcomes | train_outcomes) - valid_outcomes)))
output_file = os.path.join(FLAGS.output_dir, 'md.json')
md = { 'num_valid_shards': FLAGS.valid_shards,
'num_train_shards': FLAGS.train_shards,
'valid_counts': valid,
'train_counts': train,
'timestamp': str(datetime.now()),
'nlabels': len(train_outcomes) }
with open(output_file, 'w') as f:
json.dump(md, f)
if __name__ == '__main__':
tf.app.run()
| 12,580 | 38.071429 | 137 | py |
rude-carnie | rude-carnie-master/dlibdetect.py | from detect import ObjectDetector
import dlib
import cv2
FACE_PAD = 50
class FaceDetectorDlib(ObjectDetector):
def __init__(self, model_name, basename='frontal-face', tgtdir='.'):
self.tgtdir = tgtdir
self.basename = basename
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(model_name)
def run(self, image_file):
print(image_file)
img = cv2.imread(image_file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.detector(gray, 1)
images = []
bb = []
for (i, rect) in enumerate(faces):
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
bb.append((x,y,w,h))
images.append(self.sub_image('%s/%s-%d.jpg' % (self.tgtdir, self.basename, i + 1), img, x, y, w, h))
print('%d faces detected' % len(images))
for (x, y, w, h) in bb:
self.draw_rect(img, x, y, w, h)
# Fix in case nothing found in the image
outfile = '%s/%s.jpg' % (self.tgtdir, self.basename)
cv2.imwrite(outfile, img)
return images, outfile
def sub_image(self, name, img, x, y, w, h):
upper_cut = [min(img.shape[0], y + h + FACE_PAD), min(img.shape[1], x + w + FACE_PAD)]
lower_cut = [max(y - FACE_PAD, 0), max(x - FACE_PAD, 0)]
roi_color = img[lower_cut[0]:upper_cut[0], lower_cut[1]:upper_cut[1]]
cv2.imwrite(name, roi_color)
return name
def draw_rect(self, img, x, y, w, h):
upper_cut = [min(img.shape[0], y + h + FACE_PAD), min(img.shape[1], x + w + FACE_PAD)]
lower_cut = [max(y - FACE_PAD, 0), max(x - FACE_PAD, 0)]
cv2.rectangle(img, (lower_cut[1], lower_cut[0]), (upper_cut[1], upper_cut[0]), (255, 0, 0), 2)
| 1,853 | 36.836735 | 112 | py |
rude-carnie | rude-carnie-master/eval.py | """
At each tick, evaluate the latest checkpoint against some validation data.
Or, you can run once by passing --run_once. OR, you can pass a --requested_step_seq of comma separated checkpoint #s that already exist that it can run in a row.
This program expects a training base directory with the data, and md.json file
There will be sub-directories for each run underneath with the name run-<PID>
where <PID> is the training program's process ID. To run this program, you
will need to pass --train_dir <DIR> which is the base path name, --run_id <PID>
and if you are using a custom name for your checkpoint, you should
pass that as well (most times you probably wont). This will yield a model path:
<DIR>/run-<PID>/checkpoint
Note: If you are training to use the same GPU you can supposedly
suspend the process. I have not found this works reliably on my Linux machine.
Instead, I have found that, often times, the GPU will not reclaim the resources
and in that case, your eval may run out of GPU memory.
You can alternately run trainining for a number of steps, break the program
and run this, then restarting training from the old checkpoint. I also
found this inconvenient. In order to control this better, the program
requires that you explict placement of inference. It defaults to the CPU
so that it can easily run side by side with training. This does make it
much slower than if it was on the GPU, but for evaluation this may not be
a major problem. To place on the gpu, just pass --device_id /gpu:<ID> where
<ID> is the GPU ID
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
from data import inputs
import numpy as np
import tensorflow as tf
from model import select_model, get_checkpoint
import os
import json
tf.app.flags.DEFINE_string('train_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/tf/test_fold_is_0',
'Training directory (where training data lives)')
tf.app.flags.DEFINE_integer('run_id', 0,
'This is the run number (pid) for training proc')
tf.app.flags.DEFINE_string('device_id', '/cpu:0',
'What processing unit to execute inference on')
tf.app.flags.DEFINE_string('eval_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/tf/eval_test_fold_is_0',
'Directory to put output to')
tf.app.flags.DEFINE_string('eval_data', 'valid',
'Data type (valid|train)')
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
'Number of preprocessing threads')
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 10000,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
tf.app.flags.DEFINE_integer('image_size', 227,
'Image size')
tf.app.flags.DEFINE_integer('batch_size', 128,
'Batch size')
tf.app.flags.DEFINE_string('checkpoint', 'checkpoint',
'Checkpoint basename')
tf.app.flags.DEFINE_string('model_type', 'default',
'Type of convnet')
tf.app.flags.DEFINE_string('requested_step_seq', '', 'Requested step to restore')
FLAGS = tf.app.flags.FLAGS
def eval_once(saver, summary_writer, summary_op, logits, labels, num_eval, requested_step=None):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
top1 = tf.nn.in_top_k(logits, labels, 1)
top2 = tf.nn.in_top_k(logits, labels, 2)
with tf.Session() as sess:
checkpoint_path = '%s/run-%d' % (FLAGS.train_dir, FLAGS.run_id)
model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
saver.restore(sess, model_checkpoint_path)
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_steps = int(math.ceil(num_eval / FLAGS.batch_size))
true_count1 = true_count2 = 0
total_sample_count = num_steps * FLAGS.batch_size
step = 0
print(FLAGS.batch_size, num_steps)
while step < num_steps and not coord.should_stop():
start_time = time.time()
v, predictions1, predictions2 = sess.run([logits, top1, top2])
duration = time.time() - start_time
sec_per_batch = float(duration)
examples_per_sec = FLAGS.batch_size / sec_per_batch
true_count1 += np.sum(predictions1)
true_count2 += np.sum(predictions2)
format_str = ('%s (%.1f examples/sec; %.3f sec/batch)')
print(format_str % (datetime.now(),
examples_per_sec, sec_per_batch))
step += 1
# Compute precision @ 1.
at1 = true_count1 / total_sample_count
at2 = true_count2 / total_sample_count
print('%s: precision @ 1 = %.3f (%d/%d)' % (datetime.now(), at1, true_count1, total_sample_count))
print('%s: recall @ 2 = %.3f (%d/%d)' % (datetime.now(), at2, true_count2, total_sample_count))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=at1)
summary.value.add(tag=' Recall @ 2', simple_value=at2)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate(run_dir):
with tf.Graph().as_default() as g:
input_file = os.path.join(FLAGS.train_dir, 'md.json')
print(input_file)
with open(input_file, 'r') as f:
md = json.load(f)
eval_data = FLAGS.eval_data == 'valid'
num_eval = md['%s_counts' % FLAGS.eval_data]
model_fn = select_model(FLAGS.model_type)
with tf.device(FLAGS.device_id):
print('Executing on %s' % FLAGS.device_id)
images, labels, _ = inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, train=not eval_data, num_preprocess_threads=FLAGS.num_preprocess_threads)
logits = model_fn(md['nlabels'], images, 1, False)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(run_dir, g)
saver = tf.train.Saver()
if FLAGS.requested_step_seq:
sequence = FLAGS.requested_step_seq.split(',')
for requested_step in sequence:
print('Running %s' % sequence)
eval_once(saver, summary_writer, summary_op, logits, labels, num_eval, requested_step)
else:
while True:
print('Running loop')
eval_once(saver, summary_writer, summary_op, logits, labels, num_eval)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
run_dir = '%s/run-%d' % (FLAGS.eval_dir, FLAGS.run_id)
if tf.gfile.Exists(run_dir):
tf.gfile.DeleteRecursively(run_dir)
tf.gfile.MakeDirs(run_dir)
evaluate(run_dir)
if __name__ == '__main__':
tf.app.run()
| 8,021 | 40.138462 | 165 | py |
rude-carnie | rude-carnie-master/train.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from datetime import datetime
import time
import os
import numpy as np
import tensorflow as tf
from data import distorted_inputs
from model import select_model
import json
import re
LAMBDA = 0.01
MOM = 0.9
tf.app.flags.DEFINE_string('pre_checkpoint_path', '',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
tf.app.flags.DEFINE_string('train_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/tf/test_fold_is_0',
'Training directory')
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
'Number of preprocessing threads')
tf.app.flags.DEFINE_string('optim', 'Momentum',
'Optimizer')
tf.app.flags.DEFINE_integer('image_size', 227,
'Image size')
tf.app.flags.DEFINE_float('eta', 0.01,
'Learning rate')
tf.app.flags.DEFINE_float('pdrop', 0.,
'Dropout probability')
tf.app.flags.DEFINE_integer('max_steps', 40000,
'Number of iterations')
tf.app.flags.DEFINE_integer('steps_per_decay', 10000,
'Number of steps before learning rate decay')
tf.app.flags.DEFINE_float('eta_decay_rate', 0.1,
'Learning rate decay')
tf.app.flags.DEFINE_integer('epochs', -1,
'Number of epochs')
tf.app.flags.DEFINE_integer('batch_size', 128,
'Batch size')
tf.app.flags.DEFINE_string('checkpoint', 'checkpoint',
'Checkpoint name')
tf.app.flags.DEFINE_string('model_type', 'default',
'Type of convnet')
tf.app.flags.DEFINE_string('pre_model',
'',#'./inception_v3.ckpt',
'checkpoint file')
FLAGS = tf.app.flags.FLAGS
# Every 5k steps cut learning rate in half
def exponential_staircase_decay(at_step=10000, decay_rate=0.1):
print('decay [%f] every [%d] steps' % (decay_rate, at_step))
def _decay(lr, global_step):
return tf.train.exponential_decay(lr, global_step,
at_step, decay_rate, staircase=True)
return _decay
def optimizer(optim, eta, loss_fn, at_step, decay_rate):
global_step = tf.Variable(0, trainable=False)
optz = optim
if optim == 'Adadelta':
optz = lambda lr: tf.train.AdadeltaOptimizer(lr, 0.95, 1e-6)
lr_decay_fn = None
elif optim == 'Momentum':
optz = lambda lr: tf.train.MomentumOptimizer(lr, MOM)
lr_decay_fn = exponential_staircase_decay(at_step, decay_rate)
return tf.contrib.layers.optimize_loss(loss_fn, global_step, eta, optz, clip_gradients=4., learning_rate_decay_fn=lr_decay_fn)
def loss(logits, labels):
labels = tf.cast(labels, tf.int32)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
losses = tf.get_collection('losses')
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = cross_entropy_mean + LAMBDA * sum(regularization_losses)
tf.summary.scalar('tl (raw)', total_loss)
#total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def main(argv=None):
with tf.Graph().as_default():
model_fn = select_model(FLAGS.model_type)
# Open the metadata file and figure out nlabels, and size of epoch
input_file = os.path.join(FLAGS.train_dir, 'md.json')
print(input_file)
with open(input_file, 'r') as f:
md = json.load(f)
images, labels, _ = distorted_inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, FLAGS.num_preprocess_threads)
logits = model_fn(md['nlabels'], images, 1-FLAGS.pdrop, True)
total_loss = loss(logits, labels)
train_op = optimizer(FLAGS.optim, FLAGS.eta, total_loss, FLAGS.steps_per_decay, FLAGS.eta_decay_rate)
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement))
tf.global_variables_initializer().run(session=sess)
# This is total hackland, it only works to fine-tune iv3
if FLAGS.pre_model:
inception_variables = tf.get_collection(
tf.GraphKeys.VARIABLES, scope="InceptionV3")
restorer = tf.train.Saver(inception_variables)
restorer.restore(sess, FLAGS.pre_model)
if FLAGS.pre_checkpoint_path:
if tf.gfile.Exists(FLAGS.pre_checkpoint_path) is True:
print('Trying to restore checkpoint from %s' % FLAGS.pre_checkpoint_path)
restorer = tf.train.Saver()
tf.train.latest_checkpoint(FLAGS.pre_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), FLAGS.pre_checkpoint_path))
run_dir = '%s/run-%d' % (FLAGS.train_dir, os.getpid())
checkpoint_path = '%s/%s' % (run_dir, FLAGS.checkpoint)
if tf.gfile.Exists(run_dir) is False:
print('Creating %s' % run_dir)
tf.gfile.MakeDirs(run_dir)
tf.train.write_graph(sess.graph_def, run_dir, 'model.pb', as_text=True)
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(run_dir, sess.graph)
steps_per_train_epoch = int(md['train_counts'] / FLAGS.batch_size)
num_steps = FLAGS.max_steps if FLAGS.epochs < 1 else FLAGS.epochs * steps_per_train_epoch
print('Requested number of steps [%d]' % num_steps)
for step in xrange(num_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, total_loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.3f (%.1f examples/sec; %.3f ' 'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
# Loss only actually evaluated every 100 steps?
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
if step % 1000 == 0 or (step + 1) == num_steps:
saver.save(sess, checkpoint_path, global_step=step)
if __name__ == '__main__':
tf.app.run()
| 7,702 | 38.911917 | 130 | py |
rude-carnie | rude-carnie-master/guess.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
from data import inputs
import numpy as np
import tensorflow as tf
from model import select_model, get_checkpoint
from utils import *
import os
import json
import csv
RESIZE_FINAL = 227
GENDER_LIST =['M','F']
AGE_LIST = ['(0, 2)','(4, 6)','(8, 12)','(15, 20)','(25, 32)','(38, 43)','(48, 53)','(60, 100)']
MAX_BATCH_SZ = 128
tf.app.flags.DEFINE_string('model_dir', '',
'Model directory (where training data lives)')
tf.app.flags.DEFINE_string('class_type', 'age',
'Classification type (age|gender)')
tf.app.flags.DEFINE_string('device_id', '/cpu:0',
'What processing unit to execute inference on')
tf.app.flags.DEFINE_string('filename', '',
'File (Image) or File list (Text/No header TSV) to process')
tf.app.flags.DEFINE_string('target', '',
'CSV file containing the filename processed along with best guess and score')
tf.app.flags.DEFINE_string('checkpoint', 'checkpoint',
'Checkpoint basename')
tf.app.flags.DEFINE_string('model_type', 'default',
'Type of convnet')
tf.app.flags.DEFINE_string('requested_step', '', 'Within the model directory, a requested step to restore e.g., 9000')
tf.app.flags.DEFINE_boolean('single_look', False, 'single look at the image or multiple crops')
tf.app.flags.DEFINE_string('face_detection_model', '', 'Do frontal face detection with model specified')
tf.app.flags.DEFINE_string('face_detection_type', 'cascade', 'Face detection model type (yolo_tiny|cascade)')
FLAGS = tf.app.flags.FLAGS
def one_of(fname, types):
return any([fname.endswith('.' + ty) for ty in types])
def resolve_file(fname):
if os.path.exists(fname): return fname
for suffix in ('.jpg', '.png', '.JPG', '.PNG', '.jpeg'):
cand = fname + suffix
if os.path.exists(cand):
return cand
return None
def classify_many_single_crop(sess, label_list, softmax_output, coder, images, image_files, writer):
try:
num_batches = math.ceil(len(image_files) / MAX_BATCH_SZ)
pg = ProgressBar(num_batches)
for j in range(num_batches):
start_offset = j * MAX_BATCH_SZ
end_offset = min((j + 1) * MAX_BATCH_SZ, len(image_files))
batch_image_files = image_files[start_offset:end_offset]
print(start_offset, end_offset, len(batch_image_files))
image_batch = make_multi_image_batch(batch_image_files, coder)
batch_results = sess.run(softmax_output, feed_dict={images:image_batch.eval()})
batch_sz = batch_results.shape[0]
for i in range(batch_sz):
output_i = batch_results[i]
best_i = np.argmax(output_i)
best_choice = (label_list[best_i], output_i[best_i])
print('Guess @ 1 %s, prob = %.2f' % best_choice)
if writer is not None:
f = batch_image_files[i]
writer.writerow((f, best_choice[0], '%.2f' % best_choice[1]))
pg.update()
pg.done()
except Exception as e:
print(e)
print('Failed to run all images')
def classify_one_multi_crop(sess, label_list, softmax_output, coder, images, image_file, writer):
try:
print('Running file %s' % image_file)
image_batch = make_multi_crop_batch(image_file, coder)
batch_results = sess.run(softmax_output, feed_dict={images:image_batch.eval()})
output = batch_results[0]
batch_sz = batch_results.shape[0]
for i in range(1, batch_sz):
output = output + batch_results[i]
output /= batch_sz
best = np.argmax(output)
best_choice = (label_list[best], output[best])
print('Guess @ 1 %s, prob = %.2f' % best_choice)
nlabels = len(label_list)
if nlabels > 2:
output[best] = 0
second_best = np.argmax(output)
print('Guess @ 2 %s, prob = %.2f' % (label_list[second_best], output[second_best]))
if writer is not None:
writer.writerow((image_file, best_choice[0], '%.2f' % best_choice[1]))
except Exception as e:
print(e)
print('Failed to run image %s ' % image_file)
def list_images(srcfile):
with open(srcfile, 'r') as csvfile:
delim = ',' if srcfile.endswith('.csv') else '\t'
reader = csv.reader(csvfile, delimiter=delim)
if srcfile.endswith('.csv') or srcfile.endswith('.tsv'):
print('skipping header')
_ = next(reader)
return [row[0] for row in reader]
def main(argv=None): # pylint: disable=unused-argument
files = []
if FLAGS.face_detection_model:
print('Using face detector (%s) %s' % (FLAGS.face_detection_type, FLAGS.face_detection_model))
face_detect = face_detection_model(FLAGS.face_detection_type, FLAGS.face_detection_model)
face_files, rectangles = face_detect.run(FLAGS.filename)
print(face_files)
files += face_files
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
nlabels = len(label_list)
print('Executing on %s' % FLAGS.device_id)
model_fn = select_model(FLAGS.model_type)
with tf.device(FLAGS.device_id):
images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
logits = model_fn(nlabels, images, 1, False)
init = tf.global_variables_initializer()
requested_step = FLAGS.requested_step if FLAGS.requested_step else None
checkpoint_path = '%s' % (FLAGS.model_dir)
model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
saver = tf.train.Saver()
saver.restore(sess, model_checkpoint_path)
softmax_output = tf.nn.softmax(logits)
coder = ImageCoder()
# Support a batch mode if no face detection model
if len(files) == 0:
if (os.path.isdir(FLAGS.filename)):
for relpath in os.listdir(FLAGS.filename):
abspath = os.path.join(FLAGS.filename, relpath)
if os.path.isfile(abspath) and any([abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
print(abspath)
files.append(abspath)
else:
files.append(FLAGS.filename)
# If it happens to be a list file, read the list and clobber the files
if any([FLAGS.filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
files = list_images(FLAGS.filename)
writer = None
output = None
if FLAGS.target:
print('Creating output file %s' % FLAGS.target)
output = open(FLAGS.target, 'w')
writer = csv.writer(output)
writer.writerow(('file', 'label', 'score'))
image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
print(image_files)
if FLAGS.single_look:
classify_many_single_crop(sess, label_list, softmax_output, coder, images, image_files, writer)
else:
for image_file in image_files:
classify_one_multi_crop(sess, label_list, softmax_output, coder, images, image_file, writer)
if output is not None:
output.close()
if __name__ == '__main__':
tf.app.run()
| 8,091 | 37.903846 | 136 | py |
AdaptiveGCL | AdaptiveGCL-main/Params.py | import argparse
def ParseArgs():
parser = argparse.ArgumentParser(description='Model Params')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--batch', default=4096, type=int, help='batch size')
parser.add_argument('--tstBat', default=256, type=int, help='number of users in a testing batch')
parser.add_argument('--reg', default=1e-5, type=float, help='weight decay regularizer')
parser.add_argument('--epoch', default=200, type=int, help='number of epochs')
parser.add_argument('--latdim', default=32, type=int, help='embedding size')
parser.add_argument('--gnn_layer', default=2, type=int, help='number of gnn layers')
parser.add_argument('--topk', default=20, type=int, help='K of top K')
parser.add_argument('--data', default='yelp', type=str, help='name of dataset')
parser.add_argument('--ssl_reg', default=0.1, type=float, help='weight for contrative learning')
parser.add_argument("--ib_reg", type=float, default=0.1, help='weight for information bottleneck')
parser.add_argument('--temp', default=0.5, type=float, help='temperature in contrastive learning')
parser.add_argument('--tstEpoch', default=1, type=int, help='number of epoch to test while training')
parser.add_argument('--gpu', default=-1, type=int, help='indicates which gpu to use')
parser.add_argument('--lambda0', type=float, default=1e-4, help='weight for L0 loss on laplacian matrix.')
parser.add_argument('--gamma', type=float, default=-0.45)
parser.add_argument('--zeta', type=float, default=1.05)
parser.add_argument('--init_temperature', type=float, default=2.0)
parser.add_argument('--temperature_decay', type=float, default=0.98)
parser.add_argument("--eps", type=float, default=1e-8)
return parser.parse_args()
args = ParseArgs()
| 1,788 | 62.892857 | 107 | py |
AdaptiveGCL | AdaptiveGCL-main/DataHandler.py | import pickle
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix, dok_matrix
from Params import args
import scipy.sparse as sp
from Utils.TimeLogger import log
import torch as t
import torch.utils.data as data
import torch.utils.data as dataloader
class DataHandler:
def __init__(self):
if args.data == 'yelp':
predir = './Datasets/sparse_yelp/'
elif args.data == 'lastfm':
predir = './Datasets/lastFM/'
elif args.data == 'beer':
predir = './Datasets/beerAdvocate/'
self.predir = predir
self.trnfile = predir + 'trnMat.pkl'
self.tstfile = predir + 'tstMat.pkl'
def loadOneFile(self, filename):
with open(filename, 'rb') as fs:
ret = (pickle.load(fs) != 0).astype(np.float32)
if type(ret) != coo_matrix:
ret = sp.coo_matrix(ret)
return ret
def normalizeAdj(self, mat):
degree = np.array(mat.sum(axis=-1))
dInvSqrt = np.reshape(np.power(degree, -0.5), [-1])
dInvSqrt[np.isinf(dInvSqrt)] = 0.0
dInvSqrtMat = sp.diags(dInvSqrt)
return mat.dot(dInvSqrtMat).transpose().dot(dInvSqrtMat).tocoo()
def makeTorchAdj(self, mat):
# make ui adj
a = sp.csr_matrix((args.user, args.user))
b = sp.csr_matrix((args.item, args.item))
mat = sp.vstack([sp.hstack([a, mat]), sp.hstack([mat.transpose(), b])])
mat = (mat != 0) * 1.0
mat = (mat + sp.eye(mat.shape[0])) * 1.0
mat = self.normalizeAdj(mat)
# make cuda tensor
idxs = t.from_numpy(np.vstack([mat.row, mat.col]).astype(np.int64))
vals = t.from_numpy(mat.data.astype(np.float32))
shape = t.Size(mat.shape)
return t.sparse.FloatTensor(idxs, vals, shape).cuda()
def LoadData(self):
trnMat = self.loadOneFile(self.trnfile)
tstMat = self.loadOneFile(self.tstfile)
self.trnMat = trnMat
args.user, args.item = trnMat.shape
self.torchBiAdj = self.makeTorchAdj(trnMat)
trnData = TrnData(trnMat)
self.trnLoader = dataloader.DataLoader(trnData, batch_size=args.batch, shuffle=True, num_workers=0)
tstData = TstData(tstMat, trnMat)
self.tstLoader = dataloader.DataLoader(tstData, batch_size=args.tstBat, shuffle=False, num_workers=0)
class TrnData(data.Dataset):
def __init__(self, coomat):
self.rows = coomat.row
self.cols = coomat.col
self.dokmat = coomat.todok()
self.negs = np.zeros(len(self.rows)).astype(np.int32)
def negSampling(self):
for i in range(len(self.rows)):
u = self.rows[i]
while True:
iNeg = np.random.randint(args.item)
if (u, iNeg) not in self.dokmat:
break
self.negs[i] = iNeg
def __len__(self):
return len(self.rows)
def __getitem__(self, idx):
return self.rows[idx], self.cols[idx], self.negs[idx]
class TstData(data.Dataset):
def __init__(self, coomat, trnMat):
self.csrmat = (trnMat.tocsr() != 0) * 1.0
tstLocs = [None] * coomat.shape[0]
tstUsrs = set()
for i in range(len(coomat.data)):
row = coomat.row[i]
col = coomat.col[i]
if tstLocs[row] is None:
tstLocs[row] = list()
tstLocs[row].append(col)
tstUsrs.add(row)
tstUsrs = np.array(list(tstUsrs))
self.tstUsrs = tstUsrs
self.tstLocs = tstLocs
def __len__(self):
return len(self.tstUsrs)
def __getitem__(self, idx):
return self.tstUsrs[idx], np.reshape(self.csrmat[self.tstUsrs[idx]].toarray(), [-1]) | 3,205 | 29.245283 | 103 | py |
AdaptiveGCL | AdaptiveGCL-main/Main.py | import torch
import Utils.TimeLogger as logger
from Utils.TimeLogger import log
from Params import args
from Model import Model, vgae_encoder, vgae_decoder, vgae, DenoisingNet
from DataHandler import DataHandler
import numpy as np
from Utils.Utils import calcRegLoss, pairPredict
import os
from copy import deepcopy
import scipy.sparse as sp
import random
class Coach:
def __init__(self, handler):
self.handler = handler
print('USER', args.user, 'ITEM', args.item)
print('NUM OF INTERACTIONS', self.handler.trnLoader.dataset.__len__())
self.metrics = dict()
mets = ['Loss', 'preLoss', 'Recall', 'NDCG']
for met in mets:
self.metrics['Train' + met] = list()
self.metrics['Test' + met] = list()
def makePrint(self, name, ep, reses, save):
ret = 'Epoch %d/%d, %s: ' % (ep, args.epoch, name)
for metric in reses:
val = reses[metric]
ret += '%s = %.4f, ' % (metric, val)
tem = name + metric
if save and tem in self.metrics:
self.metrics[tem].append(val)
ret = ret[:-2] + ' '
return ret
def run(self):
self.prepareModel()
log('Model Prepared')
recallMax = 0
ndcgMax = 0
bestEpoch = 0
stloc = 0
log('Model Initialized')
for ep in range(stloc, args.epoch):
temperature = max(0.05, args.init_temperature * pow(args.temperature_decay, ep))
tstFlag = (ep % args.tstEpoch == 0)
reses = self.trainEpoch(temperature)
log(self.makePrint('Train', ep, reses, tstFlag))
if tstFlag:
reses = self.testEpoch()
if (reses['Recall'] > recallMax):
recallMax = reses['Recall']
ndcgMax = reses['NDCG']
bestEpoch = ep
log(self.makePrint('Test', ep, reses, tstFlag))
print()
print('Best epoch : ', bestEpoch, ' , Recall : ', recallMax, ' , NDCG : ', ndcgMax)
def prepareModel(self):
self.model = Model().cuda()
encoder = vgae_encoder().cuda()
decoder = vgae_decoder().cuda()
self.generator_1 = vgae(encoder, decoder).cuda()
self.generator_2 = DenoisingNet(self.model.getGCN(), self.model.getEmbeds()).cuda()
self.generator_2.set_fea_adj(args.user+args.item, deepcopy(self.handler.torchBiAdj).cuda())
self.opt = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=0)
self.opt_gen_1 = torch.optim.Adam(self.generator_1.parameters(), lr=args.lr, weight_decay=0)
self.opt_gen_2 = torch.optim.Adam(filter(lambda p: p.requires_grad, self.generator_2.parameters()), lr=args.lr, weight_decay=0, eps=args.eps)
def trainEpoch(self, temperature):
trnLoader = self.handler.trnLoader
trnLoader.dataset.negSampling()
generate_loss_1, generate_loss_2, bpr_loss, im_loss, ib_loss, reg_loss = 0, 0, 0, 0, 0, 0
steps = trnLoader.dataset.__len__() // args.batch
for i, tem in enumerate(trnLoader):
data = deepcopy(self.handler.torchBiAdj).cuda()
data1 = self.generator_generate(self.generator_1)
self.opt.zero_grad()
self.opt_gen_1.zero_grad()
self.opt_gen_2.zero_grad()
ancs, poss, negs = tem
ancs = ancs.long().cuda()
poss = poss.long().cuda()
negs = negs.long().cuda()
out1 = self.model.forward_graphcl(data1)
out2 = self.model.forward_graphcl_(self.generator_2)
loss = self.model.loss_graphcl(out1, out2, ancs, poss).mean() * args.ssl_reg
im_loss += float(loss)
loss.backward()
# info bottleneck
_out1 = self.model.forward_graphcl(data1)
_out2 = self.model.forward_graphcl_(self.generator_2)
loss_ib = self.model.loss_graphcl(_out1, out1.detach(), ancs, poss) + self.model.loss_graphcl(_out2, out2.detach(), ancs, poss)
loss= loss_ib.mean() * args.ib_reg
ib_loss += float(loss)
loss.backward()
# BPR
usrEmbeds, itmEmbeds = self.model.forward_gcn(data)
ancEmbeds = usrEmbeds[ancs]
posEmbeds = itmEmbeds[poss]
negEmbeds = itmEmbeds[negs]
scoreDiff = pairPredict(ancEmbeds, posEmbeds, negEmbeds)
bprLoss = - (scoreDiff).sigmoid().log().sum() / args.batch
regLoss = calcRegLoss(self.model) * args.reg
loss = bprLoss + regLoss
bpr_loss += float(bprLoss)
reg_loss += float(regLoss)
loss.backward()
loss_1 = self.generator_1(deepcopy(self.handler.torchBiAdj).cuda(), ancs, poss, negs)
loss_2 = self.generator_2(ancs, poss, negs, temperature)
loss = loss_1 + loss_2
generate_loss_1 += float(loss_1)
generate_loss_2 += float(loss_2)
loss.backward()
self.opt.step()
self.opt_gen_1.step()
self.opt_gen_2.step()
log('Step %d/%d: gen 1 : %.3f ; gen 2 : %.3f ; bpr : %.3f ; im : %.3f ; ib : %.3f ; reg : %.3f ' % (
i,
steps,
generate_loss_1,
generate_loss_2,
bpr_loss,
im_loss,
ib_loss,
reg_loss,
), save=False, oneline=True)
ret = dict()
ret['Gen_1 Loss'] = generate_loss_1 / steps
ret['Gen_2 Loss'] = generate_loss_2 / steps
ret['BPR Loss'] = bpr_loss / steps
ret['IM Loss'] = im_loss / steps
ret['IB Loss'] = ib_loss / steps
ret['Reg Loss'] = reg_loss / steps
return ret
def testEpoch(self):
tstLoader = self.handler.tstLoader
epRecall, epNdcg = [0] * 2
i = 0
num = tstLoader.dataset.__len__()
steps = num // args.tstBat
for usr, trnMask in tstLoader:
i += 1
usr = usr.long().cuda()
trnMask = trnMask.cuda()
usrEmbeds, itmEmbeds = self.model.forward_gcn(self.handler.torchBiAdj)
allPreds = torch.mm(usrEmbeds[usr], torch.transpose(itmEmbeds, 1, 0)) * (1 - trnMask) - trnMask * 1e8
_, topLocs = torch.topk(allPreds, args.topk)
recall, ndcg = self.calcRes(topLocs.cpu().numpy(), self.handler.tstLoader.dataset.tstLocs, usr)
epRecall += recall
epNdcg += ndcg
log('Steps %d/%d: recall = %.2f, ndcg = %.2f ' % (i, steps, recall, ndcg), save=False, oneline=True)
ret = dict()
ret['Recall'] = epRecall / num
ret['NDCG'] = epNdcg / num
return ret
def calcRes(self, topLocs, tstLocs, batIds):
assert topLocs.shape[0] == len(batIds)
allRecall = allNdcg = 0
for i in range(len(batIds)):
temTopLocs = list(topLocs[i])
temTstLocs = tstLocs[batIds[i]]
tstNum = len(temTstLocs)
maxDcg = np.sum([np.reciprocal(np.log2(loc + 2)) for loc in range(min(tstNum, args.topk))])
recall = dcg = 0
for val in temTstLocs:
if val in temTopLocs:
recall += 1
dcg += np.reciprocal(np.log2(temTopLocs.index(val) + 2))
recall = recall / tstNum
ndcg = dcg / maxDcg
allRecall += recall
allNdcg += ndcg
return allRecall, allNdcg
def generator_generate(self, generator):
edge_index = []
edge_index.append([])
edge_index.append([])
adj = deepcopy(self.handler.torchBiAdj)
idxs = adj._indices()
with torch.no_grad():
view = generator.generate(self.handler.torchBiAdj, idxs, adj)
return view
if __name__ == '__main__':
with torch.cuda.device(args.gpu):
logger.saveDefault = True
log('Start')
handler = DataHandler()
handler.LoadData()
log('Load Data')
coach = Coach(handler)
coach.run() | 6,846 | 29.9819 | 143 | py |
AdaptiveGCL | AdaptiveGCL-main/Model.py | from torch import nn
import torch.nn.functional as F
import torch
from Params import args
from copy import deepcopy
import numpy as np
import math
import scipy.sparse as sp
from Utils.Utils import contrastLoss, calcRegLoss, pairPredict
import time
import torch_sparse
init = nn.init.xavier_uniform_
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.uEmbeds = nn.Parameter(init(torch.empty(args.user, args.latdim)))
self.iEmbeds = nn.Parameter(init(torch.empty(args.item, args.latdim)))
self.gcnLayers = nn.Sequential(*[GCNLayer() for i in range(args.gnn_layer)])
def forward_gcn(self, adj):
iniEmbeds = torch.concat([self.uEmbeds, self.iEmbeds], axis=0)
embedsLst = [iniEmbeds]
for gcn in self.gcnLayers:
embeds = gcn(adj, embedsLst[-1])
embedsLst.append(embeds)
mainEmbeds = sum(embedsLst)
return mainEmbeds[:args.user], mainEmbeds[args.user:]
def forward_graphcl(self, adj):
iniEmbeds = torch.concat([self.uEmbeds, self.iEmbeds], axis=0)
embedsLst = [iniEmbeds]
for gcn in self.gcnLayers:
embeds = gcn(adj, embedsLst[-1])
embedsLst.append(embeds)
mainEmbeds = sum(embedsLst)
return mainEmbeds
def forward_graphcl_(self, generator):
iniEmbeds = torch.concat([self.uEmbeds, self.iEmbeds], axis=0)
embedsLst = [iniEmbeds]
count = 0
for gcn in self.gcnLayers:
with torch.no_grad():
adj = generator.generate(x=embedsLst[-1], layer=count)
embeds = gcn(adj, embedsLst[-1])
embedsLst.append(embeds)
count += 1
mainEmbeds = sum(embedsLst)
return mainEmbeds
def loss_graphcl(self, x1, x2, users, items):
T = args.temp
user_embeddings1, item_embeddings1 = torch.split(x1, [args.user, args.item], dim=0)
user_embeddings2, item_embeddings2 = torch.split(x2, [args.user, args.item], dim=0)
user_embeddings1 = F.normalize(user_embeddings1, dim=1)
item_embeddings1 = F.normalize(item_embeddings1, dim=1)
user_embeddings2 = F.normalize(user_embeddings2, dim=1)
item_embeddings2 = F.normalize(item_embeddings2, dim=1)
user_embs1 = F.embedding(users, user_embeddings1)
item_embs1 = F.embedding(items, item_embeddings1)
user_embs2 = F.embedding(users, user_embeddings2)
item_embs2 = F.embedding(items, item_embeddings2)
all_embs1 = torch.cat([user_embs1, item_embs1], dim=0)
all_embs2 = torch.cat([user_embs2, item_embs2], dim=0)
all_embs1_abs = all_embs1.norm(dim=1)
all_embs2_abs = all_embs2.norm(dim=1)
sim_matrix = torch.einsum('ik,jk->ij', all_embs1, all_embs2) / torch.einsum('i,j->ij', all_embs1_abs, all_embs2_abs)
sim_matrix = torch.exp(sim_matrix / T)
pos_sim = sim_matrix[np.arange(all_embs1.shape[0]), np.arange(all_embs1.shape[0])]
loss = pos_sim / (sim_matrix.sum(dim=1) - pos_sim)
loss = - torch.log(loss)
return loss
def getEmbeds(self):
self.unfreeze(self.gcnLayers)
return torch.concat([self.uEmbeds, self.iEmbeds], axis=0)
def unfreeze(self, layer):
for child in layer.children():
for param in child.parameters():
param.requires_grad = True
def getGCN(self):
return self.gcnLayers
class GCNLayer(nn.Module):
def __init__(self):
super(GCNLayer, self).__init__()
def forward(self, adj, embeds, flag=True):
if (flag):
return torch.spmm(adj, embeds)
else:
return torch_sparse.spmm(adj.indices(), adj.values(), adj.shape[0], adj.shape[1], embeds)
class vgae_encoder(Model):
def __init__(self):
super(vgae_encoder, self).__init__()
hidden = args.latdim
self.encoder_mean = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True), nn.Linear(hidden, hidden))
self.encoder_std = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True), nn.Linear(hidden, hidden), nn.Softplus())
def forward(self, adj):
x = self.forward_graphcl(adj)
x_mean = self.encoder_mean(x)
x_std = self.encoder_std(x)
gaussian_noise = torch.randn(x_mean.shape).cuda()
x = gaussian_noise * x_std + x_mean
return x, x_mean, x_std
class vgae_decoder(nn.Module):
def __init__(self, hidden=args.latdim):
super(vgae_decoder, self).__init__()
self.decoder = nn.Sequential(nn.ReLU(inplace=True), nn.Linear(hidden, hidden), nn.ReLU(inplace=True), nn.Linear(hidden, 1))
self.sigmoid = nn.Sigmoid()
self.bceloss = nn.BCELoss(reduction='none')
def forward(self, x, x_mean, x_std, users, items, neg_items, encoder):
x_user, x_item = torch.split(x, [args.user, args.item], dim=0)
edge_pos_pred = self.sigmoid(self.decoder(x_user[users] * x_item[items]))
edge_neg_pred = self.sigmoid(self.decoder(x_user[users] * x_item[neg_items]))
loss_edge_pos = self.bceloss( edge_pos_pred, torch.ones(edge_pos_pred.shape).cuda() )
loss_edge_neg = self.bceloss( edge_neg_pred, torch.zeros(edge_neg_pred.shape).cuda() )
loss_rec = loss_edge_pos + loss_edge_neg
kl_divergence = - 0.5 * (1 + 2 * torch.log(x_std) - x_mean**2 - x_std**2).sum(dim=1)
ancEmbeds = x_user[users]
posEmbeds = x_item[items]
negEmbeds = x_item[neg_items]
scoreDiff = pairPredict(ancEmbeds, posEmbeds, negEmbeds)
bprLoss = - (scoreDiff).sigmoid().log().sum() / args.batch
regLoss = calcRegLoss(encoder) * args.reg
beta = 0.1
loss = (loss_rec + beta * kl_divergence.mean() + bprLoss + regLoss).mean()
return loss
class vgae(nn.Module):
def __init__(self, encoder, decoder):
super(vgae, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, data, users, items, neg_items):
x, x_mean, x_std = self.encoder(data)
loss = self.decoder(x, x_mean, x_std, users, items, neg_items, self.encoder)
return loss
def generate(self, data, edge_index, adj):
x, _, _ = self.encoder(data)
edge_pred = self.decoder.sigmoid(self.decoder.decoder(x[edge_index[0]] * x[edge_index[1]]))
vals = adj._values()
idxs = adj._indices()
edgeNum = vals.size()
edge_pred = edge_pred[:, 0]
mask = ((edge_pred + 0.5).floor()).type(torch.bool)
newVals = vals[mask]
newVals = newVals / (newVals.shape[0] / edgeNum[0])
newIdxs = idxs[:, mask]
return torch.sparse.FloatTensor(newIdxs, newVals, adj.shape)
class DenoisingNet(nn.Module):
def __init__(self, gcnLayers, features):
super(DenoisingNet, self).__init__()
self.features = features
self.gcnLayers = gcnLayers
self.edge_weights = []
self.nblayers = []
self.selflayers = []
self.attentions = []
self.attentions.append([])
self.attentions.append([])
hidden = args.latdim
self.nblayers_0 = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True))
self.nblayers_1 = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True))
self.selflayers_0 = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True))
self.selflayers_1 = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True))
self.attentions_0 = nn.Sequential(nn.Linear( 2 * hidden, 1))
self.attentions_1 = nn.Sequential(nn.Linear( 2 * hidden, 1))
def freeze(self, layer):
for child in layer.children():
for param in child.parameters():
param.requires_grad = False
def get_attention(self, input1, input2, layer=0):
if layer == 0:
nb_layer = self.nblayers_0
selflayer = self.selflayers_0
if layer == 1:
nb_layer = self.nblayers_1
selflayer = self.selflayers_1
input1 = nb_layer(input1)
input2 = selflayer(input2)
input10 = torch.concat([input1, input2], axis=1)
if layer == 0:
weight10 = self.attentions_0(input10)
if layer == 1:
weight10 = self.attentions_1(input10)
return weight10
def hard_concrete_sample(self, log_alpha, beta=1.0, training=True):
gamma = args.gamma
zeta = args.zeta
if training:
debug_var = 1e-7
bias = 0.0
np_random = np.random.uniform(low=debug_var, high=1.0-debug_var, size=np.shape(log_alpha.cpu().detach().numpy()))
random_noise = bias + torch.tensor(np_random)
gate_inputs = torch.log(random_noise) - torch.log(1.0 - random_noise)
gate_inputs = (gate_inputs.cuda() + log_alpha) / beta
gate_inputs = torch.sigmoid(gate_inputs)
else:
gate_inputs = torch.sigmoid(log_alpha)
stretched_values = gate_inputs * (zeta-gamma) +gamma
cliped = torch.clamp(stretched_values, 0.0, 1.0)
return cliped.float()
def generate(self, x, layer=0):
f1_features = x[self.row, :]
f2_features = x[self.col, :]
weight = self.get_attention(f1_features, f2_features, layer)
mask = self.hard_concrete_sample(weight, training=False)
mask = torch.squeeze(mask)
adj = torch.sparse.FloatTensor(self.adj_mat._indices(), mask, self.adj_mat.shape)
ind = deepcopy(adj._indices())
row = ind[0, :]
col = ind[1, :]
rowsum = torch.sparse.sum(adj, dim=-1).to_dense()
d_inv_sqrt = torch.reshape(torch.pow(rowsum, -0.5), [-1])
d_inv_sqrt = torch.clamp(d_inv_sqrt, 0.0, 10.0)
row_inv_sqrt = d_inv_sqrt[row]
col_inv_sqrt = d_inv_sqrt[col]
values = torch.mul(adj._values(), row_inv_sqrt)
values = torch.mul(values, col_inv_sqrt)
support = torch.sparse.FloatTensor(adj._indices(), values, adj.shape)
return support
def l0_norm(self, log_alpha, beta):
gamma = args.gamma
zeta = args.zeta
gamma = torch.tensor(gamma)
zeta = torch.tensor(zeta)
reg_per_weight = torch.sigmoid(log_alpha - beta * torch.log(-gamma/zeta))
return torch.mean(reg_per_weight)
def set_fea_adj(self, nodes, adj):
self.node_size = nodes
self.adj_mat = adj
ind = deepcopy(adj._indices())
self.row = ind[0, :]
self.col = ind[1, :]
def call(self, inputs, training=None):
if training:
temperature = inputs
else:
temperature = 1.0
self.maskes = []
x = self.features.detach()
layer_index = 0
embedsLst = [self.features.detach()]
for layer in self.gcnLayers:
xs = []
f1_features = x[self.row, :]
f2_features = x[self.col, :]
weight = self.get_attention(f1_features, f2_features, layer=layer_index)
mask = self.hard_concrete_sample(weight, temperature, training)
self.edge_weights.append(weight)
self.maskes.append(mask)
mask = torch.squeeze(mask)
adj = torch.sparse.FloatTensor(self.adj_mat._indices(), mask, self.adj_mat.shape).coalesce()
ind = deepcopy(adj._indices())
row = ind[0, :]
col = ind[1, :]
rowsum = torch.sparse.sum(adj, dim=-1).to_dense() + 1e-6
d_inv_sqrt = torch.reshape(torch.pow(rowsum, -0.5), [-1])
d_inv_sqrt = torch.clamp(d_inv_sqrt, 0.0, 10.0)
row_inv_sqrt = d_inv_sqrt[row]
col_inv_sqrt = d_inv_sqrt[col]
values = torch.mul(adj.values(), row_inv_sqrt)
values = torch.mul(values, col_inv_sqrt)
support = torch.sparse.FloatTensor(adj._indices(), values, adj.shape).coalesce()
nextx = layer(support, x, False)
xs.append(nextx)
x = xs[0]
embedsLst.append(x)
layer_index += 1
return sum(embedsLst)
def lossl0(self, temperature):
l0_loss = torch.zeros([]).cuda()
for weight in self.edge_weights:
l0_loss += self.l0_norm(weight, temperature)
self.edge_weights = []
return l0_loss
def forward(self, users, items, neg_items, temperature):
self.freeze(self.gcnLayers)
x = self.call(temperature, True)
x_user, x_item = torch.split(x, [args.user, args.item], dim=0)
ancEmbeds = x_user[users]
posEmbeds = x_item[items]
negEmbeds = x_item[neg_items]
scoreDiff = pairPredict(ancEmbeds, posEmbeds, negEmbeds)
bprLoss = - (scoreDiff).sigmoid().log().sum() / args.batch
regLoss = calcRegLoss(self) * args.reg
lossl0 = self.lossl0(temperature) * args.lambda0
return bprLoss + regLoss + lossl0
| 11,377 | 29.180371 | 126 | py |
AdaptiveGCL | AdaptiveGCL-main/Utils/TimeLogger.py | import datetime
logmsg = ''
timemark = dict()
saveDefault = False
def log(msg, save=None, oneline=False):
global logmsg
global saveDefault
time = datetime.datetime.now()
tem = '%s: %s' % (time, msg)
if save != None:
if save:
logmsg += tem + '\n'
elif saveDefault:
logmsg += tem + '\n'
if oneline:
print(tem, end='\r')
else:
print(tem)
def marktime(marker):
global timemark
timemark[marker] = datetime.datetime.now()
if __name__ == '__main__':
log('') | 476 | 16.666667 | 43 | py |
AdaptiveGCL | AdaptiveGCL-main/Utils/Utils.py | import torch as t
import torch.nn.functional as F
def innerProduct(usrEmbeds, itmEmbeds):
return t.sum(usrEmbeds * itmEmbeds, dim=-1)
def pairPredict(ancEmbeds, posEmbeds, negEmbeds):
return innerProduct(ancEmbeds, posEmbeds) - innerProduct(ancEmbeds, negEmbeds)
def calcRegLoss(model):
ret = 0
for W in model.parameters():
ret += W.norm(2).square()
return ret
def contrastLoss(embeds1, embeds2, nodes, temp):
embeds1 = F.normalize(embeds1, p=2)
embeds2 = F.normalize(embeds2, p=2)
pckEmbeds1 = embeds1[nodes]
pckEmbeds2 = embeds2[nodes]
nume = t.exp(t.sum(pckEmbeds1 * pckEmbeds2, dim=-1) / temp)
deno = t.exp(pckEmbeds1 @ embeds2.T / temp).sum(-1)
return -t.log(nume / deno) | 694 | 29.217391 | 79 | py |
wcep-mds-dataset | wcep-mds-dataset-master/experiments/summarizer.py | import utils
from nltk import word_tokenize, bigrams
from sent_splitter import SentenceSplitter
from data import Sentence, Article
class Summarizer:
def _deduplicate(self, sents):
seen = set()
uniq_sents = []
for s in sents:
if s.text not in seen:
seen.add(s.text)
uniq_sents.append(s)
return uniq_sents
def _sent_len(self, sent, len_type):
if len_type == 'chars':
return len(sent.text)
elif len_type == 'words':
return len(sent.words)
elif len_type == 'sents':
return 1
else:
raise ValueError('len_type must be in (chars|words|sents)')
def _is_redundant(self, sents, selected, new, max_redundancy):
new_bigrams = list(bigrams(sents[new].words))
l = len(new_bigrams)
for i in selected:
old_bigrams = list(bigrams(sents[i].words))
n_matching = len([x for x in new_bigrams if x in old_bigrams])
if n_matching == 0:
continue
else:
overlap = n_matching / l
if overlap >= max_redundancy:
return True
return False
def _preprocess(self, articles):
sent_splitter = SentenceSplitter()
processed_articles = []
for a in articles:
body_sents = sent_splitter.split_sents(a['text'])
processed_title = Sentence(
text=a['title'],
words=word_tokenize(a['title']),
position=-1,
is_title=True
)
processed_sents = []
for position, s in enumerate(body_sents):
processed_sent = Sentence(
text=s,
words=word_tokenize(s),
position=position
)
processed_sents.append(processed_sent)
processed_article = Article(processed_title, processed_sents)
processed_articles.append(processed_article)
return processed_articles
def _preprocess_sents(self, raw_sents):
processed_sents = []
for s in raw_sents:
processed_sent = Sentence(
text=s,
words=word_tokenize(s),
position=None
)
processed_sents.append(processed_sent)
return processed_sents
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=60,
max_sent_tokens=7):
raise NotImplementedError
| 2,737 | 30.471264 | 74 | py |
wcep-mds-dataset | wcep-mds-dataset-master/experiments/baselines.py | import utils
import random
import collections
import numpy as np
import networkx as nx
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import MiniBatchKMeans
from summarizer import Summarizer
warnings.filterwarnings('ignore', category=RuntimeWarning)
random.seed(24)
class RandomBaseline(Summarizer):
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self._preprocess(articles)
sents = [s for a in articles for s in a.sents]
if in_titles == False or out_titles == False:
sents = [s for s in sents if not s.is_title]
sents = self._deduplicate(sents)
sent_lens = [self._sent_len(s, len_type) for s in sents]
current_len = 0
remaining = list(range(len(sents)))
random.shuffle(remaining)
selected = []
for i in remaining:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
if not (min_sent_tokens <= len(
sents[i].words) <= max_sent_tokens):
continue
selected.append(i)
current_len = new_len
if current_len >= max_len:
break
summary_sents = [sents[i].text for i in selected]
return ' '.join(summary_sents)
class RandomLead(Summarizer):
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
article_idxs = list(range(len(articles)))
random.shuffle(article_idxs)
summary = ''
for i in article_idxs:
a = articles[i]
a = self._preprocess([a])[0]
sents = a.sents
if in_titles == False or out_titles == False:
sents = [s for s in sents if not s.is_title]
current_len = 0
selected_sents = []
for s in sents:
l = self._sent_len(s, len_type)
new_len = current_len + l
if new_len <= max_len:
if not (min_sent_tokens <= len(s.words) <= max_sent_tokens):
continue
selected_sents.append(s.text)
current_len = new_len
if new_len > max_len:
break
if len(selected_sents) >= 1:
summary = ' '.join(selected_sents)
break
return summary
class TextRankSummarizer(Summarizer):
def __init__(self, max_redundancy=0.5):
self.max_redundancy = max_redundancy
def _compute_page_rank(self, S):
nodes = list(range(S.shape[0]))
graph = nx.from_numpy_matrix(S)
pagerank = nx.pagerank(graph, weight='weight')
scores = [pagerank[i] for i in nodes]
return scores
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self._preprocess(articles)
sents = [s for a in articles for s in a.sents]
if in_titles == False:
sents = [s for s in sents if not s.is_title]
sents = self._deduplicate(sents)
sent_lens = [self._sent_len(s, len_type) for s in sents]
raw_sents = [s.text for s in sents]
vectorizer = TfidfVectorizer(lowercase=True, stop_words='english')
X = vectorizer.fit_transform(raw_sents)
S = cosine_similarity(X)
scores = self._compute_page_rank(S)
scored = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)
if not out_titles:
scored = [(i, score) for (i, score) in scored
if not sents[i].is_title]
current_len = 0
selected = []
for i, _ in scored:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
if self._is_redundant(
sents, selected, i, self.max_redundancy):
continue
if not (min_sent_tokens <= len(
sents[i].words) <= max_sent_tokens):
continue
selected.append(i)
current_len = new_len
summary_sents = [sents[i].text for i in selected]
return ' '.join(summary_sents)
class CentroidSummarizer(Summarizer):
def __init__(self, max_redundancy=0.5):
self.max_redundancy = max_redundancy
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self._preprocess(articles)
sents = [s for a in articles for s in a.sents]
if in_titles == False:
sents = [s for s in sents if not s.is_title]
sents = self._deduplicate(sents)
sent_lens = [self._sent_len(s, len_type) for s in sents]
raw_sents = [s.text for s in sents]
vectorizer = TfidfVectorizer(lowercase=True, stop_words='english')
try:
X = vectorizer.fit_transform(raw_sents)
except:
return ''
centroid = X.mean(0)
scores = cosine_similarity(X, centroid)
scored = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)
if not out_titles:
scored = [(i, score) for (i, score) in scored
if not sents[i].is_title]
current_len = 0
selected = []
for i, _ in scored:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
if self._is_redundant(
sents, selected, i, self.max_redundancy):
continue
if not (min_sent_tokens <= len(
sents[i].words) <= max_sent_tokens):
continue
selected.append(i)
current_len = new_len
summary_sents = [sents[i].text for i in selected]
return ' '.join(summary_sents)
class SubmodularSummarizer(Summarizer):
"""
Selects a combination of sentences as a summary by greedily optimizing
a submodular function, in this case two functions representing
coverage and diversity of the sentence combination.
"""
def __init__(self, a=5, div_weight=6, cluster_factor=0.2):
self.a = a
self.div_weight = div_weight
self.cluster_factor = cluster_factor
def cluster_sentences(self, X):
n = X.shape[0]
n_clusters = round(self.cluster_factor * n)
if n_clusters <= 1 or n <= 2:
return dict((i, 1) for i in range(n))
clusterer = MiniBatchKMeans(
n_clusters=n_clusters,
init_size=3 * n_clusters
)
labels = clusterer.fit_predict(X)
i_to_label = dict((i, l) for i, l in enumerate(labels))
return i_to_label
def compute_summary_coverage(self,
alpha,
summary_indices,
sent_coverages,
pairwise_sims):
cov = 0
for i, i_generic_cov in enumerate(sent_coverages):
i_summary_cov = sum([pairwise_sims[i, j] for j in summary_indices])
i_cov = min(i_summary_cov, alpha * i_generic_cov)
cov += i_cov
return cov
def compute_summary_diversity(self,
summary_indices,
ix_to_label,
avg_sent_sims):
cluster_to_ixs = collections.defaultdict(list)
for i in summary_indices:
l = ix_to_label[i]
cluster_to_ixs[l].append(i)
div = 0
for l, l_indices in cluster_to_ixs.items():
cluster_score = sum([avg_sent_sims[i] for i in l_indices])
cluster_score = np.sqrt(cluster_score)
div += cluster_score
return div
def optimize(self,
sents,
max_len,
len_type,
ix_to_label,
pairwise_sims,
sent_coverages,
avg_sent_sims,
out_titles,
min_sent_tokens,
max_sent_tokens):
alpha = self.a / len(sents)
sent_lens = [self._sent_len(s, len_type) for s in sents]
current_len = 0
remaining = set(range(len(sents)))
for i, s in enumerate(sents):
bad_length = not (min_sent_tokens <= len(sents[i].words)
<= max_sent_tokens)
if bad_length:
remaining.remove(i)
elif out_titles == False and s.is_title:
remaining.remove(i)
selected = []
scored_selections = []
while current_len < max_len and len(remaining) > 0:
scored = []
for i in remaining:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
summary_indices = selected + [i]
cov = self.compute_summary_coverage(
alpha, summary_indices, sent_coverages, pairwise_sims)
div = self.compute_summary_diversity(
summary_indices, ix_to_label, avg_sent_sims)
score = cov + self.div_weight * div
scored.append((i, score))
if len(scored) == 0:
break
scored.sort(key=lambda x: x[1], reverse=True)
best_idx, best_score = scored[0]
scored_selections.append((selected + [best_idx], best_score))
current_len += sent_lens[best_idx]
selected.append(best_idx)
remaining.remove(best_idx)
scored_selections.sort(key=lambda x: x[1], reverse=True)
best_selection = scored_selections[0][0]
return best_selection
def summarize(self,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self._preprocess(articles)
sents = [s for a in articles for s in a.sents]
if in_titles == False:
sents = [s for s in sents if not s.is_title]
sents = self._deduplicate(sents)
raw_sents = [s.text for s in sents]
vectorizer = TfidfVectorizer(lowercase=True, stop_words='english')
X = vectorizer.fit_transform(raw_sents)
ix_to_label = self.cluster_sentences(X)
pairwise_sims = cosine_similarity(X)
sent_coverages = pairwise_sims.sum(0)
avg_sent_sims = sent_coverages / len(sents)
selected = self.optimize(
sents, max_len, len_type, ix_to_label,
pairwise_sims, sent_coverages, avg_sent_sims,
out_titles, min_sent_tokens, max_sent_tokens
)
summary = [sents[i].text for i in selected]
return ' '.join(summary)
| 11,786 | 33.364431 | 80 | py |
wcep-mds-dataset | wcep-mds-dataset-master/experiments/evaluate.py | import argparse
import collections
import numpy as np
import utils
from newsroom.analyze.rouge import ROUGE_L, ROUGE_N
def print_mean(results, rouge_types):
for rouge_type in rouge_types:
precs = results[rouge_type]['p']
recalls = results[rouge_type]['r']
fscores = results[rouge_type]['f']
p = round(np.mean(precs), 3)
r = round(np.mean(recalls), 3)
f = round(np.mean(fscores), 3)
print(rouge_type, 'p:', p, 'r:', r, 'f:', f)
def evaluate(ref_summaries, pred_summaries, lowercase=False):
rouge_types = ['rouge-1', 'rouge-2', 'rouge-l']
results = dict((rouge_type, collections.defaultdict(list))
for rouge_type in rouge_types)
for ref, pred in zip(ref_summaries, pred_summaries):
if lowercase:
pred = pred.lower()
ref = ref.lower()
r1 = ROUGE_N(ref, pred, n=1)
r2 = ROUGE_N(ref, pred, n=2)
rl = ROUGE_L(ref, pred)
for (rouge_type, scores) in zip(rouge_types, [r1, r2, rl]):
results[rouge_type]['p'].append(scores.precision)
results[rouge_type]['r'].append(scores.recall)
results[rouge_type]['f'].append(scores.fscore)
mean_results = {}
for rouge_type in rouge_types:
precs = results[rouge_type]['p']
recalls = results[rouge_type]['r']
fscores = results[rouge_type]['f']
mean_results[rouge_type] = {
'p': round(np.mean(precs), 3),
'r': round(np.mean(recalls), 3),
'f': round(np.mean(fscores), 3)
}
return mean_results
def evaluate_from_path(dataset_path, pred_path, start, stop, lowercase=False):
dataset = utils.read_jsonl(dataset_path)
predictions = utils.read_jsonl(pred_path)
rouge_types = ['rouge-1', 'rouge-2', 'rouge-l']
results = dict((rouge_type, collections.defaultdict(list))
for rouge_type in rouge_types)
for i, cluster in enumerate(dataset):
if start > -1 and i < start:
continue
if stop > -1 and i >= stop:
break
prediction = next(predictions)
assert prediction['cluster_id'] == cluster['id']
hyp = prediction['summary']
ref = cluster['summary']
if lowercase:
hyp = hyp.lower()
ref = ref.lower()
r1 = ROUGE_N(ref, hyp, n=1)
r2 = ROUGE_N(ref, hyp, n=2)
rl = ROUGE_L(ref, hyp)
for (rouge_type, scores) in zip(rouge_types, [r1, r2, rl]):
results[rouge_type]['p'].append(scores.precision)
results[rouge_type]['r'].append(scores.recall)
results[rouge_type]['f'].append(scores.fscore)
if i % 100 == 0:
print(i)
# print_mean(results, rouge_types)
print('Final Average:')
print_mean(results, rouge_types)
return results
def main(args):
results = evaluate(args.dataset, args.preds, args.start, args.stop,
args.lowercase)
utils.write_json(results, args.o)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset')
parser.add_argument('--preds')
parser.add_argument('--o')
parser.add_argument('--start', type=int, default=-1)
parser.add_argument('--stop', type=int, default=-1)
parser.add_argument('--lowercase', action='store_true')
main(parser.parse_args())
| 3,425 | 29.589286 | 78 | py |
wcep-mds-dataset | wcep-mds-dataset-master/experiments/utils.py | import json
import gzip
import pickle
def read_lines(path):
with open(path) as f:
for line in f:
yield line
def read_json(path):
with open(path) as f:
object = json.loads(f.read())
return object
def write_json(object, path):
with open(path, 'w') as f:
f.write(json.dumps(object))
def read_jsonl(path, load=False, start=0, stop=None):
def read_jsonl_gen(path):
with open(path) as f:
for i, line in enumerate(f):
if (stop is not None) and (i >= stop):
break
if i >= start:
yield json.loads(line)
data = read_jsonl_gen(path)
if load:
data = list(data)
return data
def read_jsonl_gz(path):
with gzip.open(path) as f:
for l in f:
yield json.loads(l)
def write_jsonl(items, path, batch_size=100, override=True):
if override:
with open(path, 'w'):
pass
batch = []
for i, x in enumerate(items):
if i > 0 and i % batch_size == 0:
with open(path, 'a') as f:
output = '\n'.join(batch) + '\n'
f.write(output)
batch = []
raw = json.dumps(x)
batch.append(raw)
if batch:
with open(path, 'a') as f:
output = '\n'.join(batch) + '\n'
f.write(output)
def load_pkl(path):
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
def dump_pkl(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def args_to_summarize_settings(args):
args = vars(args)
settings = {}
for k in ['len_type', 'max_len',
'min_sent_tokens', 'max_sent_tokens',
'in_titles', 'out_titles']:
settings[k] = args[k]
return settings
| 1,835 | 20.6 | 60 | py |
wcep-mds-dataset | wcep-mds-dataset-master/experiments/oracles.py | import argparse
from collections import Counter
from nltk import word_tokenize, ngrams
from summarizer import Summarizer
import utils
def compute_rouge_n(hyp, ref, rouge_n=1, tokenize=True):
hyp_words = word_tokenize(hyp) if tokenize else hyp
ref_words = word_tokenize(ref) if tokenize else ref
if rouge_n > 1:
hyp_items = list(ngrams(hyp_words, n=rouge_n))
ref_items = list(ngrams(ref_words, n=rouge_n))
else:
hyp_items = hyp_words
ref_items = ref_words
if len(hyp_items) == 0 or len(ref_items) == 0:
return {'p': 0., 'r': 0., 'f': 0.}
hyp_counts = Counter(hyp_items)
ref_counts = Counter(ref_items)
match = 0
for tok in hyp_counts:
match += min(hyp_counts[tok], ref_counts[tok])
prec_denom = sum(hyp_counts.values())
if match == 0 or prec_denom == 0:
precision = 0
else:
precision = match / prec_denom
rec_denom = sum(ref_counts.values())
if match == 0 or rec_denom == 0:
recall = 0
else:
recall = match / rec_denom
if precision == 0 or recall == 0:
fscore = 0
else:
fscore = 2 * precision * recall / (precision + recall)
return {'p': precision, 'r': recall, 'f': fscore}
class Oracle():
def __init__(self, rouge_n=1, metric='f', early_stopping=True):
self.rouge_n = rouge_n
self.metric = metric
self.early_stopping = early_stopping
self.summarizer = Summarizer()
def summarize(self,
ref,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self.summarizer._preprocess(articles)
sents = [s for a in articles for s in a.sents]
sents = self.summarizer._deduplicate(sents)
if in_titles == False or out_titles == False:
sents = [s for s in sents if not s.is_title]
sent_lens = [self.summarizer._sent_len(s, len_type) for s in sents]
current_len = 0
remaining = list(range(len(sents)))
selected = []
scored_selections = []
ref_words = word_tokenize(ref)
while current_len < max_len and len(remaining) > 0:
scored = []
current_summary_words = [
tok for i in selected for tok in sents[i].words
]
for i in remaining:
new_len = current_len + sent_lens[i]
if new_len <= max_len:
try:
summary_words = current_summary_words + sents[i].words
rouge_scores = compute_rouge_n(
summary_words,
ref_words,
rouge_n=self.rouge_n,
tokenize=False
)
score = rouge_scores[self.metric]
scored.append((i, score))
except:
pass
if len(scored) == 0:
break
scored.sort(key=lambda x: x[1], reverse=True)
best_idx, best_score = scored[0]
scored_selections.append((selected + [best_idx], best_score))
current_len += sent_lens[best_idx]
selected.append(scored[0][0])
remaining.remove(best_idx)
if self.early_stopping == False:
# remove shorter summaries
max_sents = max([len(x[0]) for x in scored_selections])
scored_selections = [x for x in scored_selections
if len(x[0]) < max_sents]
scored_selections.sort(key=lambda x: x[1], reverse=True)
if len(scored_selections) == 0:
return ''
best_selection = scored_selections[0][0]
summary_sents = [sents[i].text for i in best_selection]
return ' '.join(summary_sents)
class SingleOracle():
def __init__(self, rouge_n=1, metric='f', early_stopping=True):
self.rouge_n = rouge_n
self.metric = metric
self.oracle = Oracle(rouge_n, metric, early_stopping)
def summarize(self,
ref,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
scored_oracles = []
for a in articles:
summary = self.oracle.summarize(
ref, [a], max_len, len_type, in_titles, out_titles,
min_sent_tokens, max_sent_tokens
)
rouge_scores = compute_rouge_n(
summary,
ref,
rouge_n=self.rouge_n,
tokenize=True
)
score = rouge_scores[self.metric]
scored_oracles.append((summary, score))
scored_oracles.sort(key=lambda x: x[1], reverse=True)
return scored_oracles[0][0]
class LeadOracle():
def __init__(self, rouge_n=1, metric='f'):
self.rouge_n = rouge_n
self.metric = metric
self.summarizer = Summarizer()
def summarize(self,
ref,
articles,
max_len=40,
len_type='words',
in_titles=False,
out_titles=False,
min_sent_tokens=7,
max_sent_tokens=40):
articles = self.summarizer._preprocess(articles)
scored_summaries = []
for a in articles:
selected_sents = []
current_len = 0
sents = a.sents
if in_titles == False or out_titles == False:
sents = [s for s in sents if not s.is_title]
for s in sents:
l = self.summarizer._sent_len(s, len_type)
new_len = current_len + l
if new_len <= max_len:
selected_sents.append(s.text)
current_len = new_len
if new_len > max_len:
break
if len(selected_sents) >= 1:
summary = ' '.join(selected_sents)
rouge_scores = compute_rouge_n(
summary,
ref,
self.rouge_n,
tokenize=True
)
score = rouge_scores[self.metric]
scored_summaries.append((summary, score))
scored_summaries.sort(key=lambda x: x[1], reverse=True)
summary = scored_summaries[0][0]
return summary
def main(args):
if args.mode == 'predict-lead-oracle':
summarizer = LeadOracle(
rouge_n=args.rouge_n,
metric=args.metric
)
elif args.mode == 'predict-oracle':
summarizer = Oracle(
rouge_n=args.rouge_n,
metric=args.metric
)
elif args.mode == 'predict-oracle-single':
summarizer = SingleOracle(
rouge_n=args.rouge_n,
metric=args.metric
)
else:
raise ValueError('Unknown or unspecified --mode: ' + args.mode)
summarize_settings = utils.args_to_summarize_settings(args)
Summarizer.summarize_dataset(
summarizer,
dataset_path=args.dataset,
pred_path=args.preds,
summarize_settings=summarize_settings,
start=args.start,
stop=args.stop,
batchsize=args.batchsize,
jobs=args.jobs,
oracle=True
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode')
parser.add_argument('--dataset')
parser.add_argument('--preds')
parser.add_argument('--start', type=int, default=-1)
parser.add_argument('--stop', type=int, default=-1)
parser.add_argument('--max-len', type=int, default=40)
parser.add_argument('--len-type', default='words')
parser.add_argument('--in-titles', action='store_true')
parser.add_argument('--out-titles', action='store_true')
# min/max sent tokens have no effect for oracles
parser.add_argument('--min-sent-tokens', type=int, default=7)
parser.add_argument('--max-sent-tokens', type=int, default=60)
parser.add_argument('--rouge-n', type=int, default=1)
parser.add_argument('--metric', default='f')
parser.add_argument('--batchsize', type=int, default=32)
parser.add_argument('--jobs', type=int, default=4)
parser.add_argument('--early-stopping', action='store_true')
main(parser.parse_args())
| 8,710 | 33.027344 | 78 | py |
wcep-mds-dataset | wcep-mds-dataset-master/experiments/data.py | import string
from spacy.lang.en import STOP_WORDS
STOP_WORDS |= set(string.punctuation)
class Article:
def __init__(self, title, sents):
self.title = title
self.sents = sents
def words(self):
if self.title is None:
return [w for s in self.sents for w in s.words]
else:
return [w for s in [self.title] + self.sents for w in s.words]
class Sentence:
def __init__(self, text, words, position, is_title=False):
self.text = text
self.words = words
self.position = position
self.content_words = [w for w in words if w not in STOP_WORDS]
self.is_title = is_title
def __len__(self):
return len(self.words) | 724 | 25.851852 | 74 | py |
wcep-mds-dataset | wcep-mds-dataset-master/experiments/sent_splitter.py | import re
from nltk import sent_tokenize
class SentenceSplitter:
"""
NLTK sent_tokenize + some fixes for common errors in news articles.
"""
def unglue(self, x):
g = x.group(0)
fixed = '{} {}'.format(g[0], g[1])
return fixed
def fix_glued_sents(self, text):
return re.sub(r'\.[A-Z]', self.unglue, text)
def fix_line_broken_sents(self, sents):
new_sents = []
for s in sents:
new_sents += [s_.strip() for s_ in s.split('\n')]
return new_sents
def split_sents(self, text):
text = self.fix_glued_sents(text)
sents = sent_tokenize(text)
sents = self.fix_line_broken_sents(sents)
sents = [s for s in sents if s != '']
return sents
| 766 | 25.448276 | 71 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_reproduction/extract_cc_articles.py | import argparse
import pathlib
import logging
import json
import subprocess
import multiprocessing
import newspaper
import sys
import time
import utils
from warcio.archiveiterator import ArchiveIterator
def read_warc_gz(path):
with open(path, 'rb') as f:
for record in ArchiveIterator(f):
# records are queries followed by response, we only need response
if record.content_type == 'application/http; msgtype=response':
yield record
def get_record_id(record):
id = record.rec_headers.get_header('WARC-Record-ID')
id = id.split('uuid:')[1].split('>')[0]
return id
def get_record_url(record):
return record.rec_headers.get_header('WARC-Target-URI')
def download_cc_file(cc_path, local_cc_path):
aws_path = f's3://commoncrawl/{cc_path}'
cmd = f'aws s3 cp {aws_path} {local_cc_path} --no-sign-request'
logging.debug(cmd)
cmd = cmd.split()
while not local_cc_path.exists():
p = subprocess.Popen(cmd)
try:
p.wait()
except KeyboardInterrupt:
p.terminate()
if local_cc_path.exists():
break
logging.info(f'file download failed, retrying: {cc_path}')
time.sleep(5)
def read_article_ids(path, max_cluster_size):
id_to_collection = {}
ids = set()
for cluster in utils.read_jsonl(path):
articles = cluster['cc_articles']
if max_cluster_size != -1:
l = max_cluster_size - len(cluster['wcep_articles'])
articles = articles[:l]
for a in articles:
ids.add(a['id'])
id_to_collection[a['id']] = cluster['collection']
return ids, id_to_collection
def extract_article(item):
html = item['html']
extracted = newspaper.Article(item['url'])
try:
extracted.download(input_html=html)
extracted.parse()
if extracted.publish_date is None:
time = None
else:
time = extracted.publish_date.isoformat()
article = {
'id': item['id'],
'cc_file': item['cc_file'],
'time': time,
'title': extracted.title,
'text': extracted.text,
'url': item['url'],
'collection': item['collection'],
}
except Exception as e:
logging.error(f'record-id: {item["id"]}, error:{e}')
article = None
return article
def process_batch(items, out_path, jobs):
logging.debug('extracting articles...')
pool = multiprocessing.Pool(processes=jobs)
try:
articles = pool.map(extract_article, items)
articles = [a for a in articles if a is not None]
pool.close()
logging.debug('extracting articles done')
except KeyboardInterrupt:
pool.terminate()
sys.exit()
utils.write_jsonl(articles, out_path, mode='a')
new_record_ids = [x['id'] for x in items]
logging.info(f'done-record-ids:{" ".join(new_record_ids)}')
return articles
def parse_logged_record_ids(line):
ids = line.split('done-cc-ids:')[1]
ids = ids.split()
return set(ids)
def parse_logged_cc_file(line):
return line.split('done-cc-file:')[1].strip()
def read_log(path):
done_cc_files = set()
done_record_ids = set()
with open(path) as f:
for line in f:
if 'done-cc-file' in line:
done_cc_files.add(parse_logged_cc_file(line))
elif 'done-cc-ids' in line:
done_record_ids |= parse_logged_record_ids(line)
return done_cc_files, done_record_ids
def mute_other_loggers():
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.getLogger('newspaper').setLevel(logging.WARNING)
logging.getLogger('chardet.charsetprober').setLevel(logging.WARNING)
def main(args):
storage = pathlib.Path(args.storage)
logpath = storage / 'log.txt'
cc_files_path = storage / 'cc_files.txt'
out_path = storage / 'cc_articles.jsonl'
if not storage.exists():
storage.mkdir()
if args.override and out_path.exists():
out_path.unlink()
if args.override and logpath.exists():
logpath.unlink()
logging.basicConfig(
level=logging.DEBUG,
filename=logpath,
filemode=('w' if args.override else 'a'),
format='%(asctime)s %(levelname)-8s %(message)s'
)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
mute_other_loggers()
if logpath.exists():
done_cc_files, done_record_ids = read_log(logpath)
else:
done_cc_files, done_record_ids = set(), set()
cc_files = list(utils.read_lines(cc_files_path))
todo_record_ids, id_to_collection = read_article_ids(
args.dataset, args.max_cluster_size)
n_files = len(cc_files)
for i, cc_file in enumerate(cc_files):
if cc_file in done_cc_files:
continue
logging.debug(f'file {i+1}/{n_files}')
local_cc_path = storage / cc_file.split('/')[-1]
if not local_cc_path.exists():
download_cc_file(cc_file, local_cc_path)
batch = []
n_found_articles = 0
for i, record in enumerate(read_warc_gz(local_cc_path)):
if i % 10000 == 0:
logging.debug(
f'{i} records checked, {n_found_articles} articles found')
id = get_record_id(record)
if id in todo_record_ids:
n_found_articles += 1
item = {
'id': id,
'html': record.content_stream().read(),
'url': get_record_url(record),
'collection': id_to_collection[id],
'cc_file': cc_file
}
batch.append(item)
if len(batch) >= args.batchsize:
process_batch(batch, out_path, args.jobs)
batch = []
if batch:
process_batch(batch, out_path, args.jobs)
logging.info(f'done-cc-file:{cc_file}')
local_cc_path.unlink()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True)
parser.add_argument('--storage', required=True)
parser.add_argument('--override', action='store_true')
parser.add_argument('--max-cluster-size', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=1000)
parser.add_argument('--jobs', type=int, default=4)
main(parser.parse_args())
| 6,569 | 29.137615 | 78 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_reproduction/combine_and_split.py | import argparse
import json
import pathlib
import shutil
import utils
from collections import defaultdict
def get_article_to_cluster_mappings(clusters):
url_to_cluster_idxs = defaultdict(list)
id_to_cluster_idx = {}
for i, c in enumerate(clusters):
for a in c['wcep_articles']:
url_to_cluster_idxs[a['archive_url']].append(i)
for a in c['cc_articles']:
id_to_cluster_idx[a['id']] = i
return url_to_cluster_idxs, id_to_cluster_idx
def add_wcep_articles_to_clusters(wcep_path, url_to_cluster_idxs, clusters):
print('adding articles from WCEP to clusters')
for a in utils.read_jsonl(wcep_path):
for i in url_to_cluster_idxs[a['archive_url']]:
c = clusters[i]
c.setdefault('wcep_articles_filled', [])
c['wcep_articles_filled'].append(a)
def add_cc_articles_to_clusters(clusters, cc_path, id_to_cluster_idx, tmp_clusters_path):
print('adding articles from CommonCrawl to clusters')
n_clusters = len(clusters)
n_clusters_done = 0
for i, a in enumerate(utils.read_jsonl(cc_path)):
if i % 10000 == 0:
print(f'{i} cc articles done, {n_clusters_done}/{n_clusters} clusters done')
cluster_idx = id_to_cluster_idx[a['id']]
c = clusters[cluster_idx]
if c is not None:
c['cc_articles_filled'].append(a)
c['cc_ids_filled'].add(a['id'])
if c['cc_ids'] == c['cc_ids_filled']:
del c['cc_ids'], c['cc_ids_filled']
utils.write_jsonl([c], tmp_clusters_path, mode='a')
clusters[cluster_idx] = None
n_clusters_done += 1
# remaining few clusters that only have WCEP but not CC articles
for c in clusters:
if c is not None and c['cc_ids'] == c['cc_ids_filled']:
print("Hmm")
del c['cc_ids'], c['cc_ids_filled']
utils.write_jsonl([c], tmp_clusters_path, mode='a')
clusters[cluster_idx] = None
n_clusters_done += 1
print(f'{i} cc articles done, {n_clusters_done}/{n_clusters} clusters done')
def split_dataset(outdir, tmp_clusters_path):
print('splitting dataset into train/val/test...')
for i, c in enumerate(utils.read_jsonl(tmp_clusters_path)):
if i % 1000 == 0:
print(i, 'clusters done')
outpath = outdir / (c['collection'] + '.jsonl')
utils.write_jsonl([c], outpath, mode='a')
def cleanup_clusters(path, tmp_path):
print('cleaning up:', path.name)
for i, c in enumerate(utils.read_jsonl(path)):
if i % 1000 == 0:
print(i, 'clusters done')
articles = []
if 'wcep_articles_filled' in c:
for a in c['wcep_articles_filled']:
a['origin'] = 'WCEP'
articles.append(a)
if 'cc_articles_filled' in c:
for a in c['cc_articles_filled']:
a['origin'] = 'CommonCrawl'
articles.append(a)
c = {
'id': c['id'],
'date': c['date'],
'summary': c['summary'],
'articles': articles,
'collection': c['collection'],
'wiki_links': c['wiki_links'],
'reference_urls': c['reference_urls'],
'category': c['category']
}
utils.write_jsonl([c], tmp_path, mode='a')
shutil.move(tmp_path, path)
def main(args):
outdir = pathlib.Path(args.o)
if outdir.exists():
shutil.rmtree(outdir)
outdir.mkdir()
tmp_clusters_path = outdir / 'tmp_clusters.jsonl'
if tmp_clusters_path.exists():
tmp_clusters_path.unlink()
# get article -> cluster mappings
clusters = list(utils.read_jsonl(args.dataset))
for c in clusters:
if args.max_cluster_size != -1:
l = args.max_cluster_size - len(c['wcep_articles'])
c['cc_articles'] = c['cc_articles'][:l]
c['cc_ids'] = set([a['id'] for a in c['cc_articles']])
c['cc_ids_filled'] = set()
c['cc_articles_filled'] = []
url_to_cluster_idxs, id_to_cluster_idx = get_article_to_cluster_mappings(
clusters
)
# add articles from WCEP to clusters, using URLs
add_wcep_articles_to_clusters(
args.wcep_articles, url_to_cluster_idxs, clusters
)
# add articles from CommonCrawl to clusters, using IDs
add_cc_articles_to_clusters(
clusters, args.cc_articles, id_to_cluster_idx, tmp_clusters_path
)
# split clusters into separate train/val/test files
split_dataset(outdir, tmp_clusters_path)
tmp_clusters_path.unlink()
for fn in ['train.jsonl', 'val.jsonl', 'test.jsonl']:
cleanup_clusters(outdir / fn, tmp_clusters_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True)
parser.add_argument('--wcep-articles', required=True)
parser.add_argument('--cc-articles', required=True)
parser.add_argument('--max-cluster-size', type=int, default=-1)
parser.add_argument('--o', required=True)
main(parser.parse_args())
| 5,119 | 32.907285 | 89 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_reproduction/utils.py | import json
def read_lines(path):
with open(path) as f:
for line in f:
yield line.strip()
def read_jsonl(path):
with open(path) as f:
for line in f:
yield json.loads(line)
def write_jsonl(items, path, mode='a'):
assert mode in ['w', 'a']
lines = [json.dumps(x) for x in items]
with open(path, mode) as f:
f.write('\n'.join(lines) + '\n')
| 421 | 18.181818 | 42 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_reproduction/extract_wcep_articles.py | import argparse
import multiprocessing
import time
import pathlib
import random
import newspaper
import json
import numpy as np
import utils
def extract_article(todo_article):
url = todo_article['archive_url']
extracted = newspaper.Article(url)
try:
extracted.download()
extracted.parse()
if extracted.publish_date is None:
time = None
else:
time = extracted.publish_date.isoformat()
article = {
'time': time,
'title': extracted.title,
'text': extracted.text,
'url': todo_article['url'],
'archive_url': url,
'collection': todo_article['collection'],
'state': 'successful',
'error': None,
}
except Exception as e:
print(e)
article = {
'archive_url': url,
'state': 'failed',
'error': str(e),
}
return article
def batches(iterable, n=1):
l = len(iterable)
for i in range(0, l, n):
yield iterable[i:min(i + n, l)]
def read_input(path):
articles = []
with open(path) as f:
for line in f:
cluster = json.loads(line)
for a in cluster['wcep_articles']:
a['collection'] = cluster['collection']
articles.append(a)
return articles
def main(args):
outpath = pathlib.Path(args.o)
done_urls = set()
failed_articles = []
n_done = 0
n_success = 0
if args.override and outpath.exists():
outpath.unlink()
elif outpath.exists():
with open(outpath) as f:
for line in f:
a = json.loads(line)
url = a['archive_url']
if a['state'] == 'successful':
n_success += 1
else:
failed_articles.append(a)
n_done += 1
done_urls.add(url)
todo_articles = read_input(args.i)
n_total = len(todo_articles)
todo_articles = [a for a in todo_articles if a['archive_url']
not in done_urls]
print('failed articles from last run:', len(failed_articles))
print('articles todo:', len(todo_articles))
if args.repeat_failed:
todo_articles = failed_articles + todo_articles
if args.shuffle:
random.shuffle(todo_articles)
durations = []
t1 = time.time()
for todo_batch in batches(todo_articles, args.batchsize):
pool = multiprocessing.Pool(processes=args.jobs)
output = pool.map(extract_article, todo_batch)
pool.close()
articles = []
for a in output:
if a['state'] == 'successful':
n_success += 1
articles.append(a)
done_urls.add(a['archive_url'])
n_done += 1
if articles:
utils.write_jsonl(articles, outpath, mode='a')
t2 = time.time()
elapsed = t2 - t1
durations.append(elapsed)
t1 = t2
print(f'{n_done}/{n_total} done, {n_success}/{n_done} successful')
print('Average per-batch time (seconds):')
print('last batch:', elapsed)
print('last 10:', np.mean(durations[-10:]))
print('overall:', np.mean(durations))
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--i', required=True)
parser.add_argument('--o', required=True)
parser.add_argument('--batchsize', type=int, default=20)
parser.add_argument('--jobs', type=int, default=2)
parser.add_argument('--override', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--repeat-failed', action='store_true')
main(parser.parse_args())
| 3,803 | 25.601399 | 74 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_generation/step5_combine_dataset.py | import argparse
from general import utils
def load_urls(path):
url_to_arc = {}
arc_to_url = {}
with open(path) as f:
for line in f:
parts = line.split()
if len(parts) == 2:
url, arc_url = parts
url_to_arc[url] = arc_url
arc_to_url[arc_url] = url
return url_to_arc, arc_to_url
def main(args):
articles = list(utils.read_jsonl(args.articles))
events = list(utils.read_jsonl(args.events))
url_to_arc, arc_to_url = load_urls(args.urls)
url_to_article = {}
for a in articles:
arc_url = a['url']
if arc_url in arc_to_url:
url = arc_to_url[arc_url]
url_to_article[url] = a
a['archive_url'] = arc_url
a['url'] = url
new_events = []
for e in events:
e_urls = e['references']
e_articles = [url_to_article[url]
for url in e_urls if url in url_to_article]
e['articles'] = e_articles
if len(e_articles) > 0:
new_events.append(e)
print('original events:', len(events))
print('new events:', len(new_events))
utils.write_jsonl(new_events, args.o)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--articles', required=True)
parser.add_argument('--events', required=True)
parser.add_argument('--urls', required=True)
parser.add_argument('--o', required=True)
main(parser.parse_args())
| 1,505 | 25.421053 | 65 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_generation/step2_process_wcep_html.py | import argparse
import datetime
import calendar
import pathlib
import collections
import arrow
import json
import uuid
from bs4 import BeautifulSoup
def make_month_to_int():
month_to_int = {}
for i, month in enumerate(calendar.month_name):
if i > 0:
month_to_int[month] = i
return month_to_int
EVENTS = []
TOPIC_TO_SUB = collections.defaultdict(set)
TOPIC_TO_SUPER = collections.defaultdict(set)
EVENT_ID_COUNTER = 0
MONTH_TO_INT = make_month_to_int()
class Event:
def __init__(self, text, id, date, category=None, stories=None,
wiki_links=None, references=None):
# print(f'[{date}] {text}')
# print(stories)
# for url in references:
# print(url)
# print()
self.text = text
self.id = id
self.date = date
self.category = category
self.stories = stories if stories else []
self.wiki_links = wiki_links if wiki_links else []
self.references = references if references else []
def to_json_dict(self):
return {
'text': self.text,
'id': self.id,
'date': str(self.date),
'category': self.category,
'stories': self.stories,
'wiki_links': self.wiki_links,
'references': self.references,
}
def url_to_time(url, month_to_num):
tail = url.split('/')[-1]
month, year = tail.split('_')
m = month_to_num[month]
y = int(year)
return datetime.datetime(year=y, month=m, day=1)
def extract_date(date_div):
date = date_div.find('span', class_='summary')
date = date.text.split('(')[1].split(')')[0]
date = arrow.get(date)
date = datetime.date(date.year, date.month, date.day)
return date
def wiki_link_to_id(s):
return s.split('/wiki/')[1]
def recursively_extract_bullets(e,
date,
category,
prev_stories,
is_root=False):
global EVENT_ID_COUNTER
if is_root:
lis = e.find_all('li', recursive=False)
result = [recursively_extract_bullets(li, date, category, [])
for li in lis]
return result
else:
ul = e.find('ul')
if ul:
# intermediate "node", e.g. a story an event is assigned to
links = e.find_all('a', recursive=False)
new_stories = []
for link in links:
try:
new_stories.append(wiki_link_to_id(link.get('href')))
except:
print("not a wiki link:", link)
lis = ul.find_all('li', recursive=False)
for prev_story in prev_stories:
for new_story in new_stories:
TOPIC_TO_SUB[prev_story].add(new_story)
TOPIC_TO_SUPER[new_story].add(prev_story)
stories = prev_stories + new_stories
for li in lis:
recursively_extract_bullets(li, date, category, stories)
else:
# reached the "leaf", i.e. event summary
text = e.text
wiki_links = []
references = []
for link in e.find_all('a'):
url = link.get('href')
if link.get('rel') == ['nofollow']:
references.append(url)
elif url.startswith('/wiki'):
wiki_links.append(url)
event = Event(text=text, id=EVENT_ID_COUNTER, date=date,
category=category, stories=prev_stories,
wiki_links=wiki_links, references=references)
EVENTS.append(event)
EVENT_ID_COUNTER += 1
def process_month_page_2004_to_2017(html):
soup = BeautifulSoup(html, 'html.parser')
days = soup.find_all('table', class_='vevent')
for day in days:
date = extract_date(day)
#print('DATE:', date)
category = None
desc = day.find('td', class_='description')
for e in desc.children:
if e.name == 'dl':
category = e.text
elif e.name == 'ul':
recursively_extract_bullets(e, date, category, [], is_root=True)
def process_month_page_from_2018(html):
soup = BeautifulSoup(html, 'html.parser')
days = soup.find_all('div', class_='vevent')
for day in days:
date = extract_date(day)
#print('DATE:', date)
category = None
desc = day.find('div', class_='description')
for e in desc.children:
if e.name == 'div' and e.get('role') == 'heading':
category = e.text
#print('-'*25, 'CATEGORY:', category, '-'*25, '\n')
elif e.name == 'ul':
recursively_extract_bullets(e, date, category, [], is_root=True)
def file_to_date(path):
fname = str(path.name)
month, year = fname.split('.')[0].split('_')
month = MONTH_TO_INT[month]
year = int(year)
date = datetime.date(year, month, 1)
return date
def main(args):
in_dir = pathlib.Path(args.i)
for fpath in sorted(in_dir.iterdir(), key=file_to_date):
fname = fpath.name
with open(fpath) as f:
html = f.read()
year = int(fname.split('.')[0].split('_')[1])
if 2004 <= year < 2018:
print(fname)
process_month_page_2004_to_2017(html)
elif 2018 <= year :
print(fname)
process_month_page_from_2018(html)
EVENTS.sort(key=lambda x: x.date)
with open(args.o, 'w') as f:
for e in EVENTS:
e_json = json.dumps(e.to_json_dict())
f.write(e_json + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--i', type=str, help='input directory', required=True)
parser.add_argument('--o', type=str, help='output file', required=True)
main(parser.parse_args()) | 6,015 | 29.231156 | 80 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_generation/step4_scrape_sources.py | import argparse
import multiprocessing
import json
import os
import time
import pathlib
import random
import newspaper
import json
import numpy as np
def scrape_article(url):
a = newspaper.Article(url)
error = None
try:
a.download()
a.parse()
if a.publish_date is None:
time = None
else:
time = a.publish_date.isoformat()
article = {
'time': time,
'title': a.title,
'text': a.text,
'url': url,
'state': 'successful',
'error': None,
}
except Exception as e:
print(e)
article = {
'url': url,
'state': 'failed',
'error': str(e),
}
error = e
return url, article, error
def write_articles(articles, path):
lines = [json.dumps(a) for a in articles]
with open(path, 'a') as f:
f.write('\n'.join(lines) + '\n')
def batches(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def load_urls(path):
urls = []
with open(path) as f:
for line in f:
original_url, archive_url = line.split()
if archive_url != 'None':
urls.append(archive_url)
return urls
def main(args):
outpath = pathlib.Path(args.o)
done_urls = set()
failed_urls = []
n_success = 0
if args.override and outpath.exists():
outpath.unlink()
elif outpath.exists():
with open(outpath) as f:
for line in f:
a = json.loads(line)
url = a['url']
if a['state'] == 'successful':
n_success += 1
else:
failed_urls.append(url)
done_urls.add(url)
urls = load_urls(args.i)
if args.repeat_failed:
todo_urls = failed_urls + [url for url in urls if url not in done_urls]
else:
todo_urls = [url for url in urls if url not in done_urls]
if args.shuffle:
random.shuffle(todo_urls)
n_done = len(done_urls)
n_total = len(urls)
durations = []
t1 = time.time()
for url_batch in batches(todo_urls, args.batchsize):
pool = multiprocessing.Pool(processes=args.jobs)
output = pool.map(scrape_article, url_batch)
pool.close()
articles = []
for url, a, error in output:
if a['state'] == 'successful':
n_success += 1
articles.append(a)
n_done += 1
done_urls.add(url)
if articles:
write_articles(articles, outpath)
t2 = time.time()
elapsed = t2 - t1
durations.append(elapsed)
t1 = t2
print(f'{n_done}/{n_total} done')
print(f'total: {n_total}, done: {n_done}, successful: {n_success}')
print('TIME (seconds):')
print('last batch:', elapsed)
print('last 5:', np.mean(durations[-10:]))
print('overall 5:', np.mean(durations))
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--i', required=True)
parser.add_argument('--o', required=True)
parser.add_argument('--batchsize', type=int, default=20)
parser.add_argument('--jobs', type=int, default=2)
parser.add_argument('--override', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--repeat-failed', action='store_true')
main(parser.parse_args()) | 3,582 | 23.710345 | 79 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_generation/step1_store_wcep_html.py | import requests
import argparse
import pathlib
from bs4 import BeautifulSoup
ROOT_URL = 'https://en.wikipedia.org/wiki/Portal:Current_events'
def extract_month_urls():
html = requests.get(ROOT_URL).text
soup = BeautifulSoup(html, 'html.parser')
e = soup.find('div', class_='NavContent hlist')
urls = [x['href'] for x in e.find_all('a')]
urls = [url for url in urls if url.count('/') == 3]
urls = ['https://en.wikipedia.org' + url for url in urls]
return urls
def main(args):
out_dir = pathlib.Path(args.o)
if not out_dir.exists():
out_dir.mkdir()
month_urls = extract_month_urls()
print(f'Storing {len(month_urls)} WCEP month pages:')
for url in month_urls:
print(url)
fname = url.split('/')[-1] + '.html'
html = requests.get(url).text
fpath = out_dir / fname
with open(fpath, 'w') as f:
f.write(html)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--o', type=str, help='output directory', required=True)
main(parser.parse_args())
| 1,096 | 26.425 | 80 | py |
wcep-mds-dataset | wcep-mds-dataset-master/dataset_generation/step3_snapshot_source_urls.py | import argparse
import savepagenow
import json
import os
import random
import time
from requests.exceptions import ConnectionError
def read_jsonl(path):
with open(path) as f:
for line in f:
yield json.loads(line)
def write_jsonl(items, path, batch_size=100, override=True):
if override:
with open(path, 'w'):
pass
batch = []
for i, x in enumerate(items):
if i > 0 and i % batch_size == 0:
with open(path, 'a') as f:
output = '\n'.join(batch) + '\n'
f.write(output)
batch = []
raw = json.dumps(x)
batch.append(raw)
if batch:
with open(path, 'a') as f:
output = '\n'.join(batch) + '\n'
f.write(output)
def main(args):
n_done = 0
n_captured = 0
n_success = 0
done_url_set = set()
if not args.override and os.path.exists(args.o):
with open(args.o) as f:
for line in f:
url, archive_url = line.split()
n_done += 1
if archive_url != 'None':
n_success += 1
done_url_set.add(url)
events = read_jsonl(args.i)
urls = [url for e in events for url in e['references']
if url not in done_url_set]
if args.shuffle:
random.shuffle(urls)
n_total = len(urls) + len(done_url_set)
batch = []
for url in urls:
repeat = True
archive_url, captured = None, None
while repeat:
try:
archive_url, captured = savepagenow.capture_or_cache(url)
repeat = False
if captured:
n_captured += 1
n_success += 1
except Exception as e:
if isinstance(e, ConnectionError):
print('Too many requests, waiting a bit...')
repeat = True
else:
repeat = False
if repeat:
time.sleep(60)
else:
time.sleep(1)
if archive_url is not None:
batch.append((url, archive_url, captured))
n_done += 1
print(f'total: {n_total}, done: {n_done}, '
f'successful: {n_success}, captured: {n_captured}\n')
if len(batch) < args.batchsize:
lines = [f'{url} {archive_url}' for (url, archive_url, _) in batch]
if len(lines) > 0:
with open(args.o, 'a') as f:
f.write('\n'.join(lines) + '\n')
batch = []
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--i', required=True)
parser.add_argument('--o', required=True)
parser.add_argument('--batchsize', type=int, default=20)
parser.add_argument('--override', action='store_true')
parser.add_argument('--shuffle', action='store_true')
main(parser.parse_args())
| 2,971 | 27.576923 | 79 | py |
SARS-CoV-2_origins | SARS-CoV-2_origins-master/scripts/python/ACE2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 12:49:25 2020
@author: Erwan Sallard erwan.sallard@ens.psl.eu
"""
'''goal: this program compares the ACE2 proteins of various organisms with a
reference ACE2 (one of the sequences in the alignment) and identify their level
of similarity on the residues important for SARS-CoV-2 spike binding
(identified in Yan et al 2020 science). The program takes as an input a
multifasta file describing the alignment between the ACE2 proteins studied and
the name of the reference sequence (for example 'HUMAN_Homo_sapiens'), and the
name of the output file. The output is a .csv file'''
import sys
alignment_file = sys.argv[1]
name_reference = sys.argv[2]
output_name=sys.argv[3]
importants=[24,30,34,41,42,82,353,357]
# the list of the most important residues for SARS-CoV-2 spike binding in human
# ACE2 protein
n_importants=len(importants)
for k in range(n_importants):
importants[k]-=1
# python counts from 0 while residues are indexed from 1, so the list must be
# reindexed
alignment=open(alignment_file,'r')
# the multifasta file of ACE2 proteins multiple alignment is open
lines=alignment.readlines()
alignment.close()
sequences=dict()
# sequences will be a dictionary with the format 'sequence_name':sequence
# for each ACE2 sequence in the alignment (including the '-' signs)
for line in lines:
if line[0]=='>' or line[0]=='\ufeff':
# if the line corresponds to a new sequence name
mots=line.split('_')
nom=mots[1]+'_'+mots[2]+'_'+mots[3][:-1]
sequences[nom]=''
else:
# if the line corresponds to the sequence itself
sequences[nom]=sequences[nom]+line[:-1]
speciesnames=list(sequences.keys())
import pandas as pd
species=pd.DataFrame()
# species will be a dataframe indicating, for each species, the number of
# differences with reference ACE2 in important residues
species['number of important residues']=[n_importants]
reference=sequences[name_reference]
# Because of '-' signs in the alignment, the position of important residues
# does not correspond to their index in the string. They must be reindexed.
counter=0
for k in reference:
if k=='-':
# all residues after this sign must be reindexed
for j in range(n_importants):
if importants[j]>=counter:
importants[j]+=1
else:
counter+=1
# The comparison proper with reference sequence:
for name in speciesnames:
sequence=sequences[name]
differences=0
for k in importants:
if sequence[k]!=reference[k]:
differences+=1
species[name]=[differences]
species=species.transpose()
species.columns=['number of differences with human on the important residues']
species.to_csv(output_name)
| 2,773 | 33.246914 | 79 | py |
SARS-CoV-2_origins | SARS-CoV-2_origins-master/scripts/python/detection_insertion.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 14:41:12 2020
@author: erwan
"""
import sys
alignment=sys.argv[1]
reference=sys.argv[2]
filename=sys.argv[3]
'''identifies the insertions in the sequence "reference" out of a multiple
alignment file in .clw format. Is considered an insertion every position
which is not a "-" in the reference sequence and where at least one other
sequence has a "-".
This function creates a .csv file in the path "filename", indicating the
positions of insertions in the reference protein (in first column) and, for
each position, the sequences having a '-' (in the other columns).
example: if "reference"= MART-PYLK and the 2 other sequences of the
alignment are seq1= MA-TVPYLK and seq2= MART-P-LK, the returned dictionary
is {3:['seq1'];6:['seq2']}'''
clwfile=open(alignment,'r')
lines=clwfile.readlines()
clwfile.close()
sequences=dict()
# sequences is a dictionary which will contain the different sequences
# separately
lines.remove(lines[0])
# we remove the first line, which is the title of the alignment
names=[] # the list of sequence names
for line in lines:
if line[0] not in ['\n', ' ']:
# meaning if the line is a sequence and not a spacer
data=line.split(' ')
name=data[0]
sequence=data[len(data)-1][:-1]
# sequence is the sequence of the line without the final \n
if name in names:
sequences[name]=sequences[name]+sequence
else:
names.append(name)
sequences[name]=sequence
reference_sequence=sequences[reference]
names.remove(reference)
# names is the list of sequence names except the reference sequence
position=0 # a counter of position in the reference protein
f=open(filename,'w') # the output file
for k in range(len(reference_sequence)):
if reference_sequence[k]!='-':
position+=1
is_insertion=False
sequences_without_insertion=[]
# if the position studied is an insertion, this list will contain
# the names of the sequences without the insertion
for name in names:
if sequences[name][k]=='-':
is_insertion=True
sequences_without_insertion.append(name)
if is_insertion:
# we write a line in the .csv file, with the insertion position
# and the names of sequences without this insertion
line=str(position)
for name in sequences_without_insertion:
line+=','+name
line+='\n'
f.writelines(line)
f.close()
| 2,581 | 32.102564 | 75 | py |
SARS-CoV-2_origins | SARS-CoV-2_origins-master/scripts/python/mutation_analyser.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 21:38:57 2020
@author: erwan
"""
import sys
import matplotlib.pyplot as plt
from Bio import pairwise2
from Bio.Seq import Seq
from Bio.SubsMat import MatrixInfo as matlist
''' This program compares two nucleotide sequences, identifies indels,
synonymous and non synonymous mutations and draws a plot of cumulated counts
of each mutation type as a function of the position in the corresponding
proteins. The program takes as input the names of the two sequences, which will
be retrieved in the data files; and the name to be given to the output
figure.'''
name1=sys.argv[1]
name2=sys.argv[2]
output_name=sys.argv[3] # the path at which the output figure will be saved
### retrieving the sequences:
nucleotides=open('data/S-gene/S-gene_all.fasta','r')
lines=nucleotides.readlines()
nucleotides.close()
nt_sequence1,nt_sequence2='',''
for k in range(len(lines)-1):
if name1 in lines[k]:
nt_sequence1=lines[k+1][:-1] # we take the sequence without the final \n
if name2 in lines[k]:
nt_sequence2=lines[k+1][:-1]
if nt_sequence1=='':
print('gene1 not found')
if nt_sequence2=='':
print('gene2 not found')
### translating the sequences into proteins
genetic_code=dict({'ttt':'F','ttc':'F','ttg':'L','tta':'L','ctt':'L','ctc':'L',
'ctg':'L','cta':'L','att':'I','atc':'I','ata':'I','atg':'M',
'gtt':'V','gtc':'V','gta':'V','gtg':'V','tct':'S','tcc':'S',
'tca':'S','tcg':'S','agt':'S','agc':'S','cct':'P','ccc':'P',
'cca':'P','ccg':'P','act':'T','acc':'T','aca':'T','acg':'T',
'gct':'A','gcc':'A','gca':'A','gcg':'A','tat':'Y','tac':'Y',
'taa':'*','tag':'*','tga':'*','cat':'H','cac':'H','caa':'Q',
'cag':'Q','aat':'N','aac':'N','aaa':'K','aag':'K','gat':'D',
'gac':'D','gaa':'E','gag':'E','tgt':'C','tgc':'C','tgg':'W',
'cgt':'R','cgc':'R','cga':'R','cgg':'R','aga':'R','agg':'R',
'ggt':'G','ggc':'G','gga':'G','ggg':'G'})
def translate(seq):
''' translates a nucleotide into a protein sequence.'''
seq=seq.lower()
number_codons=len(seq)/3
if number_codons!=int(number_codons):
return('the sequence length is not a multiple of 3')
number_codons=int(number_codons)
protein=''
for k in range(number_codons):
protein+=genetic_code[seq[3*k:3*k+3]]
if protein[len(protein)-1]=='*':
return protein[:len(protein)-1] # we remove the final '*'
return protein
protein1=translate(nt_sequence1)
protein2=translate(nt_sequence2)
### aligning the protein sequences
reformated_protein1=Seq(protein1)
reformated_protein2=Seq(protein2)
alignments = pairwise2.align.globalds(reformated_protein1,reformated_protein2,matlist.blosum62,-10,-1)
aligned_protein1=alignments[0][0]
aligned_protein2=alignments[0][1]
### counts
number_positions=len(aligned_protein1)
positions=[k for k in range(1,number_positions+1)] # will be the x axis in the final plot
position_in_protein1,position_in_protein2=0,0
indel_list,synonymous_list,non_synonymous_list=[],[],[]
indels,synonymous,non_synonymous=0,0,0
for k in range(number_positions):
# residues are treated one after the other
position_in_protein1+=1 # this is the position of the considered residue
# in protein1; '-' signs are not counted
position_in_protein2+=1
residue1,residue2=aligned_protein1[k],aligned_protein2[k]
if residue1=='-':
position_in_protein1-=1 # '-' signs are not counted
indels+=1 # the sign '-' indicates a deletion in protein1 compared with
# protein 2
elif residue2=='-':
position_in_protein2-=1
indels+=1
elif residue1!=residue2:
non_synonymous+=1 # the residues are different
else:
# we retrieve the codons corresponding to the considered residue
codon1=nt_sequence1[3*position_in_protein1-3:3*position_in_protein1]
codon2=nt_sequence2[3*position_in_protein2-3:3*position_in_protein2]
if codon1!=codon2:
synonymous+=1
indel_list.append(indels)
synonymous_list.append(synonymous)
non_synonymous_list.append(non_synonymous)
### draws plot
p1=plt.plot(positions,indel_list)
p2=plt.plot(positions,synonymous_list)
p3=plt.plot(positions,non_synonymous_list)
plt.xlabel('position in the protein alignment')
plt.ylabel('cumulated mutation counts')
plt.title('comparison of '+name1+' and '+name2)
plt.legend(('indels','synonymous mutations','non synonymous mutations'))
plt.savefig(output_name,format='pdf')
plt.show()
| 4,657 | 38.142857 | 102 | py |
RG | RG-master/Image Classification/main.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import random
from resnet import *
from utils import progress_bar
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0
start_epoch = 0
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=0)
def poly(base_lr, epoch,max_iter=100,power=0.9):
return base_lr*((1-float(epoch+1)/max_iter)**(power))
# Model
print('==> Building model..')
net = ResNet34()
if device == 'cuda':
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.5, weight_decay=0.0001)
M_loss = 0
# Training
def train(epoch):
global M_loss
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss_ = loss * random.random() # Here represents the Random Gradient
loss_.backward()
optimizer.step()
train_loss += loss_.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
open('./random_gradient_accuracy.txt', 'a').write(str(epoch) + '_' + str(acc) + ',')
best_acc = acc
print('best_acc:', best_acc)
def adjust_learning_rate(optimizer, epoch, net):
lr = poly(0.1, epoch) # This is normal way to reduce the LR, you can replace it with CLR
print('current lr: ', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
for epoch in range(0, 100):
train(epoch)
test(epoch)
adjust_learning_rate(optimizer,epoch,net)
| 4,105 | 30.343511 | 109 | py |
RG | RG-master/Image Classification/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2])
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
| 3,941 | 32.982759 | 102 | py |
RG | RG-master/Image Classification/utils.py | import os
import sys
import time
term_width = 5
TOTAL_BAR_LENGTH = 20.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| 2,068 | 23.927711 | 64 | py |
RG | RG-master/pix2pix/pix2pix.py | import argparse
import os
import numpy as np
import math
import itertools
import time
import datetime
import sys
import random
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from models import *
from datasets import *
import torch.nn as nn
import torch.nn.functional as F
import torch
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=0, help='epoch to start training from')
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--dataset_name', type=str, default="facades", help='name of the dataset')
parser.add_argument('--batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--decay_epoch', type=int, default=100, help='epoch from which to start lr decay')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--img_height', type=int, default=256, help='size of image height')
parser.add_argument('--img_width', type=int, default=256, help='size of image width')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=500, help='interval between sampling of images from generators')
parser.add_argument('--checkpoint_interval', type=int, default=-1, help='interval between model checkpoints')
opt = parser.parse_args()
print(opt)
os.makedirs('images/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('saved_models/%s' % opt.dataset_name, exist_ok=True)
cuda = True if torch.cuda.is_available() else False
# Loss functions
criterion_GAN = torch.nn.MSELoss()
criterion_pixelwise = torch.nn.L1Loss()
# Loss weight of L1 pixel-wise loss between translated image and real image
lambda_pixel = 100
# Calculate output of image discriminator (PatchGAN)
patch = (1, opt.img_height//2**4, opt.img_width//2**4)
# Initialize generator and discriminator
generator = GeneratorUNet()
discriminator = Discriminator()
if cuda:
generator = generator.cuda()
discriminator = discriminator.cuda()
criterion_GAN.cuda()
criterion_pixelwise.cuda()
if opt.epoch != 0:
# Load pretrained models
generator.load_state_dict(torch.load('saved_models/%s/generator_%d.pth' % (opt.dataset_name, opt.epoch)))
discriminator.load_state_dict(torch.load('saved_models/%s/discriminator_%d.pth' % (opt.dataset_name, opt.epoch)))
else:
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
# Configure dataloaders
transforms_ = [ transforms.Resize((opt.img_height, opt.img_width), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]
dataloader = DataLoader(ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_),
batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu)
val_dataloader = DataLoader(ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_, mode='val'),
batch_size=10, shuffle=True, num_workers=1)
# Tensor type
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def sample_images(batches_done):
"""Saves a generated sample from the validation set"""
imgs = next(iter(val_dataloader))
real_A = Variable(imgs['B'].type(Tensor))
real_B = Variable(imgs['A'].type(Tensor))
fake_B = generator(real_A)
img_sample = torch.cat((real_A.data, fake_B.data, real_B.data), -2)
save_image(img_sample, 'images/%s/%s.png' % (opt.dataset_name, batches_done), nrow=5, normalize=True)
# ----------
# Training
# ----------
prev_time = time.time()
for epoch in range(opt.epoch, opt.n_epochs):
for i, batch in enumerate(dataloader):
# Model inputs
real_A = Variable(batch['B'].type(Tensor))
real_B = Variable(batch['A'].type(Tensor))
# Adversarial ground truths
valid = Variable(Tensor(np.ones((real_A.size(0), *patch))), requires_grad=False)
fake = Variable(Tensor(np.zeros((real_A.size(0), *patch))), requires_grad=False)
# ------------------
# Train Generators
# ------------------
optimizer_G.zero_grad()
# GAN loss
fake_B = generator(real_A)
pred_fake = discriminator(fake_B, real_A)
loss_GAN = criterion_GAN(pred_fake, valid)
# Pixel-wise loss
loss_pixel = criterion_pixelwise(fake_B, real_B)
# Total loss
loss_G = loss_GAN + lambda_pixel * loss_pixel
# Random Gradient
loss_G = loss_G * random.random()
loss_G.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Real loss
pred_real = discriminator(real_B, real_A)
loss_real = criterion_GAN(pred_real, valid)
# Fake loss
pred_fake = discriminator(fake_B.detach(), real_A)
loss_fake = criterion_GAN(pred_fake, fake)
# Total loss
loss_D = 0.5 * (loss_real + loss_fake)
loss_D.backward()
optimizer_D.step()
# --------------
# Log Progress
# --------------
# Determine approximate time left
batches_done = epoch * len(dataloader) + i
batches_left = opt.n_epochs * len(dataloader) - batches_done
time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
sys.stdout.write("\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, pixel: %f, adv: %f] ETA: %s" %
(epoch, opt.n_epochs,
i, len(dataloader),
loss_D.item(), loss_G.item(),
loss_pixel.item(), loss_GAN.item(),
time_left))
# If at sample interval save image
if batches_done % opt.sample_interval == 0:
sample_images(batches_done)
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
# Save model checkpoints
torch.save(generator.state_dict(), 'saved_models/%s/generator_%d.pth' % (opt.dataset_name, epoch))
torch.save(discriminator.state_dict(), 'saved_models/%s/discriminator_%d.pth' % (opt.dataset_name, epoch))
| 7,224 | 36.827225 | 123 | py |
RG | RG-master/pix2pix/datasets.py | import glob
import random
import os
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
class ImageDataset(Dataset):
def __init__(self, root, transforms_=None, mode='train'):
self.transform = transforms.Compose(transforms_)
self.files = sorted(glob.glob(os.path.join(root, mode) + '/*.*'))
if mode == 'train':
self.files.extend(sorted(glob.glob(os.path.join(root, 'test') + '/*.*')))
def __getitem__(self, index):
img = Image.open(self.files[index % len(self.files)])
w, h = img.size
img_A = img.crop((0, 0, w/2, h))
img_B = img.crop((w/2, 0, w, h))
if np.random.random() < 0.5:
img_A = Image.fromarray(np.array(img_A)[:, ::-1, :], 'RGB')
img_B = Image.fromarray(np.array(img_B)[:, ::-1, :], 'RGB')
img_A = self.transform(img_A)
img_B = self.transform(img_B)
return {'A': img_A, 'B': img_B}
def __len__(self):
return len(self.files)
| 1,056 | 28.361111 | 85 | py |
RG | RG-master/pix2pix/models.py | import torch.nn as nn
import torch.nn.functional as F
import torch
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
##############################
# U-NET
##############################
class UNetDown(nn.Module):
def __init__(self, in_size, out_size, normalize=True, dropout=0.0):
super(UNetDown, self).__init__()
layers = [nn.Conv2d(in_size, out_size, 4, 2, 1, bias=False)]
if normalize:
layers.append(nn.InstanceNorm2d(out_size))
layers.append(nn.LeakyReLU(0.2))
if dropout:
layers.append(nn.Dropout(dropout))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class UNetUp(nn.Module):
def __init__(self, in_size, out_size, dropout=0.0):
super(UNetUp, self).__init__()
layers = [ nn.ConvTranspose2d(in_size, out_size, 4, 2, 1, bias=False),
nn.InstanceNorm2d(out_size),
nn.ReLU(inplace=True)]
if dropout:
layers.append(nn.Dropout(dropout))
self.model = nn.Sequential(*layers)
def forward(self, x, skip_input):
x = self.model(x)
x = torch.cat((x, skip_input), 1)
return x
class GeneratorUNet(nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(GeneratorUNet, self).__init__()
self.down1 = UNetDown(in_channels, 64, normalize=False)
self.down2 = UNetDown(64, 128)
self.down3 = UNetDown(128, 256)
self.down4 = UNetDown(256, 512, dropout=0.5)
self.down5 = UNetDown(512, 512, dropout=0.5)
self.down6 = UNetDown(512, 512, dropout=0.5)
self.down7 = UNetDown(512, 512, dropout=0.5)
self.down8 = UNetDown(512, 512, normalize=False, dropout=0.5)
self.up1 = UNetUp(512, 512, dropout=0.5)
self.up2 = UNetUp(1024, 512, dropout=0.5)
self.up3 = UNetUp(1024, 512, dropout=0.5)
self.up4 = UNetUp(1024, 512, dropout=0.5)
self.up5 = UNetUp(1024, 256)
self.up6 = UNetUp(512, 128)
self.up7 = UNetUp(256, 64)
self.final = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(128, out_channels, 4, padding=1),
nn.Tanh()
)
def forward(self, x):
# U-Net generator with skip connections from encoder to decoder
d1 = self.down1(x)
d2 = self.down2(d1)
d3 = self.down3(d2)
d4 = self.down4(d3)
d5 = self.down5(d4)
d6 = self.down6(d5)
d7 = self.down7(d6)
d8 = self.down8(d7)
u1 = self.up1(d8, d7)
u2 = self.up2(u1, d6)
u3 = self.up3(u2, d5)
u4 = self.up4(u3, d4)
u5 = self.up5(u4, d3)
u6 = self.up6(u5, d2)
u7 = self.up7(u6, d1)
return self.final(u7)
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, in_channels=3):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, normalization=True):
"""Returns downsampling layers of each discriminator block"""
layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]
if normalization:
layers.append(nn.InstanceNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*discriminator_block(in_channels*2, 64, normalization=False),
*discriminator_block(64, 128),
*discriminator_block(128, 256),
*discriminator_block(256, 512),
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(512, 1, 4, padding=1, bias=False)
)
def forward(self, img_A, img_B):
# Concatenate image and condition image by channels to produce input
img_input = torch.cat((img_A, img_B), 1)
return self.model(img_input)
| 4,289 | 32.515625 | 81 | py |
RG | RG-master/Semantic Segmentation/model.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
affine_par = True
import torch.nn.functional as F
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list)-1):
out += self.conv2d_list[i+1](x)
return out
class Residual_Covolution(nn.Module):
def __init__(self, icol, ocol, num_classes):
super(Residual_Covolution, self).__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
dow1 = self.conv1(x)
dow1 = self.relu(dow1)
seg = self.conv2(dow1)
inc1 = self.conv3(seg)
add1 = dow1 + self.relu(inc1)
inc2 = self.conv4(add1)
out = x + self.relu(inc2)
return out, seg
class Residual_Refinement_Module(nn.Module):
def __init__(self, num_classes):
super(Residual_Refinement_Module, self).__init__()
self.RC1 = Residual_Covolution(2048, 512, num_classes)
self.RC2 = Residual_Covolution(2048, 512, num_classes)
def forward(self, x):
x, seg1 = self.RC1(x)
_, seg2 = self.RC2(x)
return [seg1, seg1+seg2]
class ResNet_Refine(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet_Refine, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = Residual_Refinement_Module(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x_size = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
interp = F.upsample(x, x_size[2:], mode='bilinear')
return interp
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
return block(dilation_series,padding_series,num_classes)
def forward(self, x):
x_size = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
interp = F.upsample(x, x_size[2:], mode='bilinear')
return interp
class MS_Deeplab(nn.Module):
def __init__(self,block,num_classes):
super(MS_Deeplab,self).__init__()
self.Scale = ResNet(block,[3, 4, 23, 3],num_classes) #changed to fix #4
def forward(self,x):
output = self.Scale(x) # for original scale
output_size = output.size()[2]
input_size = x.size()[2]
self.interp1 = nn.Upsample(size=(int(input_size*0.75)+1, int(input_size*0.75)+1), mode='bilinear')
self.interp2 = nn.Upsample(size=(int(input_size*0.5)+1, int(input_size*0.5)+1), mode='bilinear')
self.interp3 = nn.Upsample(size=(output_size, output_size), mode='bilinear')
x75 = self.interp1(x)
output75 = self.interp3(self.Scale(x75)) # for 0.75x scale
x5 = self.interp2(x)
output5 = self.interp3(self.Scale(x5)) # for 0.5x scale
out_max = torch.max(torch.max(output, output75), output5)
return [output, output75, output5, out_max]
def Res_Ms_Deeplab(num_classes=21):
model = MS_Deeplab(Bottleneck, num_classes)
return model
def Res_Deeplab(num_classes=21, is_refine=False):
if is_refine:
model = ResNet_Refine(Bottleneck,[3, 4, 23, 3], num_classes)
else:
model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)
return model
| 11,339 | 36.549669 | 139 | py |
RG | RG-master/Semantic Segmentation/train.py | import datetime
import os
import random
import time
from math import sqrt
import torchvision.transforms as standard_transforms
import torchvision.utils as vutils
# from tensorboard import SummaryWriter
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
import utils.joint_transforms as joint_transforms
import utils.transforms as extended_transforms
from datasets import voc
from models import *
from utils import check_mkdir, evaluate, AverageMeter, CrossEntropyLoss2d
from tqdm import tqdm as tqdm
cudnn.benchmark = True
from torchvision.transforms import *
from torchvision.transforms import ToTensor, ToPILImage
ckpt_path = './ckpt'
exp_name = 'RSPPNET'
args = {
'epoch_num': 200,
'lr': 0.0001,
'weight_decay': 0.0005,
'momentum': 0.9,
'lr_patience': 100, # large patience denotes fixed lr
'snapshot': '', # empty string denotes learning from scratch
'print_freq': 1,
'val_save_to_img_file': False,
'val_img_sample_rate': 0.1 # randomly sample some validation results to display
}
def lr_poly(base_lr, iter,max_iter=200,power=0.9):
return base_lr*((1-float(iter)/max_iter)**(power))
def adjust_learning_rate(optimizer, i_iter, net, train_args):
"""Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs"""
lr = lr_poly(0.0001, i_iter)
print('current lr:', lr)
# optimizer.step()
# optimizer = optim.RMSprop(net.parameters(), lr=lr, alpha=0.99, eps=1e-08, weight_decay=train_args['weight_decay'], momentum=0.9, centered=False)
# optimizer = optim.Adam(net.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=train_args['weight_decay'])
optimizer = optim.SGD(net.parameters(),lr=lr, momentum=train_args['momentum'],weight_decay=train_args['weight_decay'])
# optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(net), 'lr': lr }, {'params': get_10x_lr_params(net), 'lr': 10*lr} ], lr = lr, momentum = train_args['momentum'],weight_decay = train_args['weight_decay'])
# optimizer.zero_grad()
# optimizer.param_groups[1]['lr'] = lr * 10
max_label = 20
def get_iou(pred,gt):
if pred.shape!= gt.shape:
print('pred shape',pred.shape, 'gt shape', gt.shape)
assert(pred.shape == gt.shape)
gt = gt.astype(np.float32)
pred = pred.astype(np.float32)
count = np.zeros((max_label+1,))
for j in range(max_label+1):
x = np.where(pred==j)
p_idx_j = set(zip(x[0].tolist(),x[1].tolist()))
x = np.where(gt==j)
GT_idx_j = set(zip(x[0].tolist(),x[1].tolist()))
#pdb.set_trace()
n_jj = set.intersection(p_idx_j,GT_idx_j)
u_jj = set.union(p_idx_j,GT_idx_j)
if len(GT_idx_j)!=0:
count[j] = float(len(n_jj))/float(len(u_jj))
result_class = count
Aiou = np.sum(result_class[:])/float(len(np.unique(gt)))
return Aiou
def main(train_args):
net = PSPNet(num_classes=voc.num_classes).cuda()
if len(train_args['snapshot']) == 0:
curr_epoch = 1
train_args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
else:
print('training resumes from ' + train_args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, train_args['snapshot'])))
split_snapshot = train_args['snapshot'].split('_')
curr_epoch = int(split_snapshot[1]) + 1
train_args['best_record'] = {'epoch': int(split_snapshot[1]), 'val_loss': float(split_snapshot[3]),
'acc': float(split_snapshot[5]), 'acc_cls': float(split_snapshot[7]),
'mean_iu': float(split_snapshot[9]), 'fwavacc': float(split_snapshot[11])}
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
input_transform = standard_transforms.Compose([
ToTensor(),
Normalize([.485, .456, .406], [.229, .224, .225]),
])
joint_transform = joint_transforms.Compose([
joint_transforms.CenterCrop(224),
# joint_transforms.Scale(2),
joint_transforms.RandomHorizontallyFlip(),
])
target_transform = standard_transforms.Compose([
extended_transforms.MaskToTensor(),
])
restore_transform = standard_transforms.Compose([
extended_transforms.DeNormalize(*mean_std),
standard_transforms.ToPILImage(),
])
visualize = standard_transforms.Compose([
standard_transforms.Scale(400),
standard_transforms.CenterCrop(400),
standard_transforms.ToTensor()
])
val_input_transform = standard_transforms.Compose([
CenterCrop(224),
ToTensor(),
Normalize([.485, .456, .406], [.229, .224, .225]),
])
val_target_transform = standard_transforms.Compose([
CenterCrop(224),
extended_transforms.MaskToTensor(),
])
train_set = voc.VOC('train', transform=input_transform, target_transform=target_transform, joint_transform=joint_transform)
train_loader = DataLoader(train_set, batch_size=4, num_workers=4, shuffle=True)
val_set = voc.VOC('val', transform=val_input_transform, target_transform=val_target_transform)
val_loader = DataLoader(val_set, batch_size=4, num_workers=4, shuffle=False)
# criterion = CrossEntropyLoss2d(size_average=True, ignore_index=voc.ignore_label).cuda()
criterion = torch.nn.CrossEntropyLoss(ignore_index=voc.ignore_label).cuda()
optimizer = optim.SGD(net.parameters(),lr=train_args['lr'], momentum=train_args['momentum'],weight_decay=train_args['weight_decay'])
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
# open(os.path.join(ckpt_path, exp_name, 'loss_001_aux_SGD_momentum_95_random_lr_001.txt'), 'w').write(str(train_args) + '\n\n')
for epoch in range(curr_epoch, train_args['epoch_num'] + 1):
# adjust_learning_rate(optimizer,epoch,net,train_args)
train(train_loader, net, criterion, optimizer, epoch, train_args)
validate(val_loader, net, criterion, optimizer, epoch, train_args, restore_transform, visualize)
adjust_learning_rate(optimizer,epoch,net,train_args)
# scheduler.step(val_loss)
def train(train_loader, net, criterion, optimizer, epoch, train_args):
# interp = nn.Upsample(size=256, mode='bilinear')
net.train()
train_loss = AverageMeter()
curr_iter = (epoch - 1) * len(train_loader)
for i, data in enumerate(train_loader):
inputs, labels = data
assert inputs.size()[2:] == labels.size()[1:]
N = inputs.size(0)
inputs = Variable(inputs).cuda()
labels = Variable(labels).cuda()
random_number = random.random()
if random_number > 0.5:
optimizer.zero_grad()
outputs,aux_logits = net(inputs)
assert outputs.size()[2:] == labels.size()[1:]
assert outputs.size()[1] == voc.num_classes
loss_1 = criterion(outputs, labels)
loss_2 = criterion(aux_logits, labels)
loss = (loss_1 + 0.4*loss_2)*random.random()
loss.backward()
optimizer.step()
train_loss.update(loss.data[0], N)
else:
optimizer.zero_grad()
outputs,aux_logits = net(inputs)
assert outputs.size()[2:] == labels.size()[1:]
assert outputs.size()[1] == voc.num_classes
loss_1 = criterion(outputs, labels)
loss_2 = criterion(aux_logits, labels)
loss = loss_1 + 0.4*loss_2
loss.backward()
optimizer.step()
train_loss.update(loss.data[0], N)
curr_iter += 1
# writer.add_scalar('train_loss', train_loss.avg, curr_iter)
if i % train_args['print_freq'] == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f],[N: %d]' % (
epoch, i + 1, len(train_loader), train_loss.avg, N
# , loss_1.data[0], loss_2.data[0],[loss %.3f],[loss2 %.3f]
))
def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore, visualize):
net.eval()
global best_acc
val_loss = AverageMeter()
inputs_all, gts_all, predictions_all = [], [], []
for vi, data in tqdm(enumerate(val_loader)):
inputs, gts = data
N = inputs.size(0)
inputs = Variable(inputs, volatile=True).cuda()
gts = Variable(gts, volatile=True).cuda()
outputs = net(inputs)
# interp = nn.Upsample(size=256, mode='bilinear')
# outputs = interp(net(inputs))
predictions = outputs.data.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()
val_loss.update(criterion(outputs, gts).data[0], N)
# if random.random() > train_args['val_img_sample_rate']:
# inputs_all.append(None)
# else:
# inputs_all.append(inputs.data.squeeze_(0).cpu())
gts_all.append(gts.data.squeeze_(0).cpu().numpy())
predictions_all.append(predictions)
# IOU.append(get_iou(outputs,gts))
acc, acc_cls, mean_iu, fwavacc = evaluate(predictions_all, gts_all, voc.num_classes)
if mean_iu > train_args['best_record']['mean_iu']:
train_args['best_record']['val_loss'] = val_loss.avg
train_args['best_record']['epoch'] = epoch
train_args['best_record']['acc'] = acc
train_args['best_record']['acc_cls'] = acc_cls
train_args['best_record']['mean_iu'] = mean_iu
train_args['best_record']['fwavacc'] = fwavacc
snapshot_name = 'epoch_%d_loss_%.5f_acc_%.5f_acc-cls_%.5f_mean-iu_%.5f_fwavacc_%.5f' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc
)
open(os.path.join(ckpt_path, exp_name, 'loss_0001_dilation_aux_SGD_momentum_090_PSPNet_L3.txt'), 'a').write(str(epoch) + '_' + str(mean_iu) + ',')
# torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, snapshot_name + '.pth'))
print('--------------------------------------------------------------------')
print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))
print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))
print('--------------------------------------------------------------------')
if __name__ == '__main__':
main(args)
| 9,955 | 38.19685 | 219 | py |
metacorps-nn | metacorps-nn-master/different_layers_experiment.py | import sys
import pandas as pd
from modelrun import ModelRun
verbose = True
n_nodes = 500
# Keeping a ModelRun allows us to not have to re-load GoogleNews model.
rows = [] # Used to build data frame and latex table.
w2v_model_loc='GoogleNews-vectors-negative300.bin'
if len(sys.argv) > 1:
run_directory = sys.argv[1]
learning_rate = float(sys.argv[2])
else:
run_directory = 'test_hyperparam_test'
mr = ModelRun(run_directory=run_directory,
limit_word2vec=300000,
w2v_model_loc=w2v_model_loc,
learning_rate=learning_rate)
layer_sizes = [500, 300, 150, 100, 50]
for idx in range(1, len(layer_sizes) + 1): # , n_hidden_layers in enumerate([1] + list(range(2, 9, 2))):
# Parameter specifying number of 500-node hidden layers.
# Confusing name, TODO change to hidden_layers all the way down.
# mr.n_hidden = [n_nodes for _ in range(n_hidden_layers)]
mr.n_hidden = layer_sizes[:idx]
if verbose:
print('Running with mr.n_hidden =', mr.n_hidden)
ev = mr.run(n_epochs=120, verbose=verbose, early_stopping_limit=25)
rows.append([
'NN{}'.format(len(mr.n_hidden)),
ev.sensitivity,
ev.specificity,
ev.precision,
ev.auc
])
modelruns_table = pd.DataFrame(
columns=['Method', 'Sens.', 'Spec.', 'Prec.', 'AUC'],
data=rows
)
with open('PerformanceTable.tex', 'w') as f:
modelruns_table.to_latex(f, float_format='%0.3f')
| 1,460 | 28.22 | 105 | py |
metacorps-nn | metacorps-nn-master/test_util.py | from util import get_window
from nose.tools import eq_
def test_get_window():
word = 'attack'
window_size = 5
# Test when we have enough space on both sides for full window.
text = 'he has to go on the attack if he wants to win the debate'
window = get_window(text, word, window_size)
eq_(window, ['has', 'to', 'go', 'on', 'the', 'attack',
'if', 'he', 'wants', 'to', 'win'])
# Test when there is not enough space on the left.
text = 'i think romney beat obama in the first debate but obama won the last two'
word = 'beat'
window = get_window(text, word, window_size)
eq_(window, ['i', 'think', 'romney', 'beat', 'obama', 'in', 'the',
'first', 'debate', 'but', 'obama'])
# Test when there is not enough space on the right.
text = 'although the i think youre right its important to consider the beating taken by romney'
window = get_window(text, word, window_size)
eq_(window, ['youre', 'right', 'its', 'important', 'to', 'consider',
'the', 'beating', 'taken', 'by', 'romney'])
text = 'he beat his opponent'
window = get_window(text, word, window_size)
eq_(window, ['he', 'beat', 'his', 'opponent', '', '', '', '', '', '', ''])
| 1,256 | 38.28125 | 99 | py |
metacorps-nn | metacorps-nn-master/modelrun.py | '''
'''
from uuid import uuid4
# Command-line interface: read it CLIck.
import click
import numpy as np
import os
import tensorflow as tf
# See https://radimrehurek.com/gensim/models/keyedvectors.html
import gensim
from eval import Eval
from model import train_network
from util import MetaphorData
# WORKFLOW
# 1. Run ./modelrun with options for how many layers, what else?
# 1. Confirm that X number of simulations will be run.
#
# CLICK EXAMPLE TO MODIFY:
@click.command()
@click.option('--count', default=1, help='Number of greetings.')
@click.option('--name', prompt='Your name', help='The person to greet.')
def hello(count, name):
for x in range(count):
click.echo('Hello %s!' % name)
class ModelRun:
'''
Neural network modeling harness to be used in grid searching and general
evaluation.
Returns:
(eval.Eval): evaluation object for comparison with other models
'''
def __init__(self,
labelled_data_loc='viomet-2012.csv',
w2v_model_loc='/data/GoogleNews-vectors-negative300.bin',
n_hidden=[300, 150],
train_ratio=0.8,
validation_ratio=0.1,
learning_rate=0.01,
batch_size=40,
activation=tf.nn.relu,
run_directory=str(uuid4()),
limit_word2vec=False):
'''
Arguments:
limit_word2vec (bool/int): number of embeddings to load from the
model loaded from `w2v_model_loc`; false if no limit
'''
self.labelled_data_loc = labelled_data_loc
self.n_hidden = n_hidden
self.train_ratio = train_ratio
self.validation_ratio = validation_ratio
self.learning_rate = learning_rate
self.batch_size = batch_size
self.activation = activation
self.run_directory = run_directory
# try:
if 'GoogleNews' in w2v_model_loc:
print(
'loading GoogleNews word2vec embeddings, takes a minute...'
)
if limit_word2vec:
self.w2v_model = \
gensim.models.KeyedVectors.load_word2vec_format(
w2v_model_loc, binary=True, limit=limit_word2vec
)
else:
self.w2v_model = \
gensim.models.KeyedVectors.load_word2vec_format(
w2v_model_loc, binary=True
)
# except Exception as e:
# print(
# '\n****\nDownload Google News word2vec embeddings from '
# 'https://goo.gl/WdCunP to your /data/ directory, dork!\n****\n'
# )
# # print(e.message())
# return None
self.metaphors = MetaphorData(
labelled_data_loc, self.w2v_model, train_ratio=train_ratio,
validation_ratio=validation_ratio
)
self.train, self.test = self.metaphors.split_train_test()
if not os.path.isdir('modelruns'):
os.mkdir('modelruns')
def run(self,
n_epochs=20,
rebuild_metaphors=True,
early_stopping_limit=10,
verbose=True):
# By default the training and testing data is split for every new run.
if rebuild_metaphors:
self.metaphors = MetaphorData(
self.labelled_data_loc, self.w2v_model,
train_ratio=self.train_ratio,
validation_ratio=self.validation_ratio
)
self.train, self.test = self.metaphors.split_train_test()
# Build checkpoint name based on parameters.
checkpoint_name = os.path.join(
'modelruns', self.run_directory,
'-'.join(str(n) for n in self.n_hidden)
)
checkpoint_name += '-{}'.format(self.train_ratio)
checkpoint_name += '-{}'.format(self.validation_ratio)
checkpoint_name += '-{}'.format(self.learning_rate)
checkpoint_name += '-{}'.format(self.activation.__name__)
# Run nn training.
X, probabilities, logits = train_network(
self.w2v_model, self.train,
checkpoint_name,
n_epochs=n_epochs,
n_hidden=self.n_hidden,
batch_size=self.batch_size,
learning_rate=self.learning_rate,
early_stopping_limit=early_stopping_limit,
verbose=verbose
)
# Standard save and reload, but TODO is it necessary? Seems sess could
# be returned as well from train_network.
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint_name)
# Z = logits.eval(feed_dict={X: self.test.embedded_sentences})
Z, probabilities = sess.run(
[logits, probabilities],
feed_dict={X: self.test.embedded_sentences}
)
y_pred = np.argmax(Z, axis=1)
self.test.add_predictions(y_pred, probabilities)
return Eval(self.test)
if __name__ == '__main__':
hello()
| 5,085 | 31.602564 | 81 | py |
metacorps-nn | metacorps-nn-master/model.py | '''
Following Do Dinh, E.-L., & Gurevych, I. (2016) using TensorFlow.
Do Dinh, E.-L., & Gurevych, I. (2016). Token-Level Metaphor
Detection using Neural Networks. Proceedings of the Fourth Workshop on
Metaphor in NLP, (June), 28–33.
Author: Matthew A. Turner
Date: 2017-12-11
'''
import tensorflow as tf
def train_network(w2v_model, training_data, model_save_path, n_outputs=2,
n_hidden=[300], context_window=5, learning_rate=1.5,
activation=tf.nn.relu, use_dropout=True, dropout_rate=0.5,
input_dropout_rate=0.8, n_epochs=40, batch_size=50,
early_stopping_limit=10, verbose=True):
'''
Arguments:
w2v_model (gensim.models.word2vec): Gensim wrapper of the word2vec
model we're using for this training
training_data (pandas.DataFrame): tabular dataset of training data
model_save_path (str): location to save model .ckpt file
n_hidden (list(int)): number of nodes per hidden layer; number of
elements in list is the number of layers
context_window (int): number of words before and after focal token
to consider
learning_rate (float): stochastic gradient descent learning rate
n_features (int): number of features for each word embedding
representation
Returns:
Trained network (TODO: what is the type from TF?)
'''
# Reset for interactive work?
tf.reset_default_graph()
# Total entries in potential metaphor phrase embedding.
n_sent_tokens = (2 * context_window) + 1
n_inputs = n_sent_tokens * w2v_model.vector_size
# Shape of None allows us to pass all of the batch in at once with a
# variable batch_size.
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')
y = tf.placeholder(tf.int64, shape=(None), name='y')
training = tf.placeholder_with_default(False, shape=(), name='training')
if use_dropout:
X_drop = tf.layers.dropout(X, input_dropout_rate, training=training)
# Track the previous layer to connect the next layer.
prev_layer = None
with tf.name_scope('dnn'):
for idx, n in enumerate(n_hidden):
name = 'hidden' + str(idx)
if idx == 0:
if use_dropout:
X_ = X_drop
else:
X_ = X
prev_layer = tf.layers.dense(
X_, n, name=name, activation=activation
)
else:
prev_layer = tf.layers.dense(
prev_layer, n, name=name, activation=activation
)
if use_dropout:
do_name = 'dropout' + str(idx)
prev_layer = tf.layers.dropout(
prev_layer, dropout_rate, training=training, name=do_name
)
logits = tf.layers.dense(prev_layer, n_outputs, name='outputs')
# Currently this is coming from the TF book Ch 10.
with tf.name_scope('loss'):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=y, logits=logits
)
loss = tf.reduce_mean(xentropy, name='loss')
with tf.name_scope('train'):
# optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
training_op = optimizer.minimize(loss)
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
probabilities = tf.nn.softmax(logits, name="softmax_tensor")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
X_validate, y_validate = training_data.validation()
with tf.Session() as sess:
init.run()
# Initialize early stopping parameters.
acc_val_best = -1.0 # So initial accuracy always better.
n_since_winner = 0
for epoch in range(n_epochs):
training_data.shuffle()
for iteration in range(training_data.num_examples // batch_size):
X_batch, y_batch = training_data.next_batch(batch_size)
sess.run(
training_op,
feed_dict={X: X_batch, y: y_batch, training: True}
)
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_val = accuracy.eval(feed_dict={X: X_validate, y: y_validate})
if acc_val > acc_val_best:
n_since_winner = 0
acc_val_best = acc_val
saver.save(sess, model_save_path)
if verbose:
print('Have a new winner: acc_val_best=', acc_val_best)
else:
n_since_winner += 1
if verbose:
print(n_since_winner, " since winner")
if n_since_winner > early_stopping_limit:
break
if verbose:
print(
epoch, 'Train accuracy: ', acc_train,
' Validation accuracy: ', acc_val
)
# saver.save(sess, model_save_path)
return X, probabilities, logits
| 5,240 | 35.908451 | 77 | py |
metacorps-nn | metacorps-nn-master/util.py | '''
Utilities for training a neural network for automated identification of
metaphorical violence.
Author: Matthew A. Turner
Date: 2017-11-21
'''
import itertools
import numpy as np
import pandas as pd
import random
import warnings
def get_window(text, focal_token, window_size):
'''
Given some text and a token of interest, generate a list representing the
context window that will be classified metaphor or not.
Arguments:
text (str): input text from the training or test dataset
focal_token (str): token to be determined metaphor or not
window_size (int): half the number of context tokens; ideally this
will be the number of tokens on either side of the focal_token,
however the window will adapt if there are not enough words
to either side of the focal_token
Returns:
(list) context tokens and focal_token
'''
tokens = text.split()
n_tokens = len(tokens)
# By default we don't need to enlarge the right window, but we might.
enlarge_right = False
# There must be at least 2*window_size + 1 tokens in order to make the
# window list. Add
full_window_len = (2 * window_size) + 1
if n_tokens < full_window_len:
tokens.extend('' for _ in range(full_window_len - n_tokens))
assert len(tokens) == full_window_len
return tokens
# We'll get a ValueError if the focal_token was not found in the list.
try:
focal_token_idx = next(idx for idx, token in enumerate(tokens)
if focal_token in token) # handle beaten, etc.
except ValueError as e:
warnings.warn(e.message)
return None
# Handle possibility that the focal token doesn't have window_size
# number of words ahead of it to use in the window.
left_padding = focal_token_idx
if left_padding < window_size:
left_window_size = left_padding
enlarge_right = True
else:
left_window_size = 5
# Number of tokens following focal_token in tokens.
right_padding = n_tokens - focal_token_idx - 1
if right_padding < window_size:
right_window_size = right_padding
# Subtract 2: one for the focal_token and one for zero indexing.
left_window_size = window_size + (n_tokens - focal_token_idx) - 2
elif enlarge_right:
right_window_size = (window_size * 2) - left_padding
else:
right_window_size = 5
left_idx = focal_token_idx - left_window_size
right_idx = focal_token_idx + right_window_size + 1
ret = tokens[left_idx:right_idx]
# XXX Not sure why this is happening, but this should limp us along.
if len(ret) < full_window_len:
remaining = full_window_len - len(ret)
ret.extend('' for _ in range(remaining))
# XXX Same here, don't know why but this should fix it. Figure this out!
elif len(ret) > full_window_len:
ret = ret[:full_window_len]
return ret
def _make_sentence_embedding(sentence, focal_token, wvmodel, window_size):
'''
Use the word2vec model to convert the sentence, or sequence of words,
to a sequence of embeddings. Limit the number of words/embeddings to
be window_size * 2 + 1, with focal_token (e.g. attack) at the center.
This is done by get_window, see that for how windowing is done.
Arguments:
sentence (str): sentence that is either a metaphor or not
focal_token (str): the word that instantiates the source domain,
e.g. attack
wvmodel (gensim.models.Word2Vec): vector space model of words as
gensim model
window_size (int): ideal number of words before and after focal_token;
may not be satisfied exactly if token word occurs early or late
in sentence
Returns:
(numpy.ndarray): matrix where each row is an embedding of the word
in the order they appear in the sentence
'''
embed_dim = wvmodel.vector_size
window = get_window(sentence, focal_token, window_size)
mat_ret = np.array([
wvmodel.wv[word]
if word in wvmodel.wv
else np.zeros(shape=(embed_dim))
for word in window
])
# Following MNIST example for now, flattening data. TODO: try using
# matrices.
return mat_ret.flatten()
class MetaphorData:
def __init__(self, data_path, w2v_model, train_ratio=0.8,
validation_ratio=0.1, window_size=5):
'''
Load labelled metaphor snippets from a .csv file. Provides methods for
creating batches of training/test data of embeddings from rows of
the .csv.
Arguments:
data_path (str): location of .csv on disk
w2v_model (gensim.Word2VecModel):
'''
self.data_frame = pd.read_csv(data_path)
# Used to generate sentence embeddings of text to classify.
self.wv = w2v_model
self.train_ratio = train_ratio
self.validation_ratio = validation_ratio
self.test_ratio = 1 - train_ratio - validation_ratio
self.window_size = window_size
def split_train_test(self, **attr_updates):
'''
Create a fresh set of training/test data using current attributes as
parameters.
'''
for key in attr_updates:
setattr(self, key, attr_updates[key])
_train, _validation, _test = self._split()
num_train = len(_train)
# num_validation = len(_validation)
num_test = len(_test)
# Build training and validation embeddings.
train_embeddings = (
_make_sentence_embedding(row[0], row[1], self.wv, self.window_size)
for row in _train[['text', 'word']].as_matrix()
)
validation_embeddings = (
_make_sentence_embedding(row[0], row[1], self.wv, self.window_size)
for row in _validation[['text', 'word']].as_matrix()
)
# Wrap training and validation embeddings and their labels with
# MetaphorDataTrain class.
self.train = MetaphorDataTrain(
train_embeddings, _train.is_metaphor.as_matrix(), num_train,
(validation_embeddings, _validation.is_metaphor)
)
# Create the test sentence embeddings.
test_embeddings = (
_make_sentence_embedding(row[0], row[1], self.wv, self.window_size)
for row in _test[['text', 'word']].as_matrix()
)
# Initialize the test data object.
self.test = MetaphorDataTest(
test_embeddings, _test.is_metaphor.as_matrix(), num_test,
)
# Add information about the original sentences, words, and labels.
self.test.add_original(_test)
return self.train, self.test
def _split(self, random_seed=42):
df = self.data_frame
n_rows = len(df)
# n_test is implicitly set when we take set differences below.
n_train = int(n_rows * self.train_ratio)
train_indexes = np.random.choice(
range(n_rows), n_train, replace=False
)
n_validation = int(n_train * self.validation_ratio)
validation_indexes = train_indexes[-n_validation:]
train_indexes = train_indexes[:-n_validation]
# Count number of metaphors in training selection.
train_df = df.iloc[train_indexes]
# Should be int but this doesn't hurt.
n_metaphor = int(df.is_metaphor.sum())
# Need to sample with replacement to build balanced training dataset.
n_to_sample = n_train - (2 * n_metaphor)
metaphor_rows = df[df.is_metaphor == 1]
if random_seed is not None:
np.random.seed(random_seed)
sample_indexes = np.random.choice(range(n_metaphor), n_to_sample)
metaphor_rows.reset_index()
train_df = train_df.append(
metaphor_rows.iloc[sample_indexes],
ignore_index=True
)
# This will be random order, length = len(n_rows) - len(n_train).
test_indexes = list(
set(self.data_frame.index)
- set(train_indexes)
- set(validation_indexes)
)
return (
train_df,
df.iloc[validation_indexes],
df.iloc[test_indexes]
)
class MetaphorDataTrain:
def __init__(self, embedded_sentences, is_metaphor_vec, num_examples,
validation_set=None):
'''
Wrap metaphor training data to generate new batches for training and
validation.
Arguments:
embedded_sentences (iter): iterator of 2D matrices representing
sentences from the corpus
is_metaphor_vec (pandas.Series or numpy.ndarray): 1D sequence of
labels corresponding to each embedded sentence; 1 for
metaphor and 0 is not metaphor
validation_set (tuple): two-tuple of a list or array of
sentence embeddings in first position and corresponding
list or array of 1/0 metaphor/not labels in the second position
'''
# Embedded sentences are of dimension window_size * embedding_dim.
# They will be cycled over each epoch.
self.num_examples = num_examples
# Step sizes that will not result in repeated selections of
# training examples in islice below, i.e. is relative prime of the
# total number of examples we have. Used to effectively shuffle
# training examples (see shuffle method below).
self.suitable_start = [
i for i in range(1, num_examples) if num_examples % i != 0
]
self.is_metaphor_vec = np.array(list(is_metaphor_vec))
self.embedded_sentences = np.array(list(embedded_sentences))
self.embedded_sentences_cycle = \
itertools.cycle(self.embedded_sentences)
self.is_metaphor_cycle = itertools.cycle(self.is_metaphor_vec)
if validation_set is not None:
self.validation_embeddings = validation_set[0]
self.validation_labels = validation_set[1]
self.validation_ = (
np.array(list(self.validation_embeddings)),
self.validation_labels
)
self.start = 0
def next_batch(self, batch_size):
# Randomize in-batch order.
sel_idx = np.random.permutation(batch_size)
embed_batch = np.array(list(
itertools.islice(
self.embedded_sentences_cycle, self.start,
self.start + batch_size
)
))[sel_idx]
is_metaphor_batch = np.array(list(
itertools.islice(
self.is_metaphor_cycle, self.start,
self.start + batch_size
)
))[sel_idx]
return embed_batch, is_metaphor_batch
def validation(self):
return self.validation_
def shuffle(self):
self.start = random.choice(self.suitable_start)
class MetaphorDataTest(MetaphorDataTrain):
def __init__(self, embedded_test_sentences, is_metaphor_vec, num_examples):
super().__init__(
embedded_test_sentences, is_metaphor_vec, num_examples
)
self.text_sentences = None
self.predicted_is_metaphor_vec = None
def add_original(self, test_df):
'''
Add the original test dataframe to the object.
Arguments:
test_df (pandas.DataFrame): table with original sentences
and other metadata in same row order as the test
embeddings and test labels.
Returns:
None
'''
self.text_sentences = test_df['text']
self.word = test_df['word']
self.test_df = test_df
def add_predictions(self, predicted_is_metaphor_vec, probabilities):
self.predicted_is_metaphor_vec = predicted_is_metaphor_vec
self.probabilities = probabilities
| 11,959 | 34.176471 | 79 | py |
metacorps-nn | metacorps-nn-master/prepare_csv_input.py | '''
Export script to create tabular dataset from the metacorps web app's
MongoDB database. A mongodump of this database is available at
http://metacorps.io/static/data/nov-15-2017-metacorps-dump.zip (594M)
'''
import numpy as np
import pandas as pd
from nltk.tokenize import RegexpTokenizer
from pymongo import MongoClient
CLIENT = MongoClient()
def make_csv(project_name='Viomet Sep-Nov 2012',
output_path='viomet-2012.csv',
random_seed=42):
'''
Keep random_seed=42 to reproduce results in paper.
'''
project = CLIENT.metacorps.project.find(
{'name': 'Viomet Sep-Nov 2012'}
).next()
facets = project['facets']
facet_docs = [
CLIENT.metacorps.facet.find({'_id': id_}).next()
# the first three facets are hit, attack, and beat
for id_ in facets[:3]
]
tokenizer = RegexpTokenizer(r'\w+')
def _preprocess(text):
text = text.lower()
text = ' '.join(tokenizer.tokenize(text))
return _replacements(text)
text_metaphor_rows = [
(
doc['word'],
instance['reference_url'],
_preprocess(instance['text']),
int(instance['figurative'])
)
for doc in facet_docs
for instance in doc['instances']
]
# create tabular format of data
data = pd.DataFrame(
data=text_metaphor_rows,
columns=['word', 'reference_url', 'text', 'is_metaphor']
)
data.to_csv(output_path, index=False)
return data
# XXX removing XXX
# now sample with replacement to get a balanced dataset
# n_metaphor = data['is_metaphor'].sum()
# n_rows = len(data)
# n_to_sample = n_rows - (2 * n_metaphor)
# print(
# (
# '{} out of {} are metaphor. '
# 'Adding {} more rows by sampling with replacement for a total '
# 'number of {} rows.'
# ).format(n_metaphor, n_rows, n_to_sample, n_to_sample + n_rows)
# )
# metaphor_rows = data[data.is_metaphor == 1]
# np.random.seed()
# indexes_to_sample = np.random.choice(range(n_metaphor), n_to_sample)
# augmentation = metaphor_rows.iloc[indexes_to_sample]
# data = data.append(augmentation, ignore_index=True)
# data.to_csv(output_path, index=False)
# return data
def _replacements(text):
text = text.replace('i m ', 'im ')
text = text.replace('dont t ', 'dont ')
text = text.replace('u s ', 'U.S. ')
text = text.replace('it s ', 'it\'s ')
text = text.replace('we re ', 'we\'re ')
text = text.replace('they re ', 'they\'re ')
text = text.replace('can t ', 'can\'t ')
text = text.replace('palins', 'palin')
return text
| 2,717 | 26.18 | 77 | py |
metacorps-nn | metacorps-nn-master/eval.py | '''
Code to evaluate a particular trained network. Think about formatting results
here well to be tables in the paper.
'''
import sklearn.metrics as skmetrics
from collections import Counter
from util import get_window
class Eval:
'''
Methods to evaluate different models.
'''
def __init__(self, test_data):
'''
Arguments:
test_data (util.MetaphorDataTest): test data
'''
self.test_data = test_data
self.test_df = test_data.test_df
self.test_sentences = self.test_data.text_sentences
self.false_negatives = self.test_df[['text', 'word']][
(self.test_data.predicted_is_metaphor_vec == 0) &
(self.test_data.is_metaphor_vec == 1)
]
self.false_positives = self.test_df[['text', 'word']][
(self.test_data.predicted_is_metaphor_vec == 1) &
(self.test_data.is_metaphor_vec == 0)
]
self.true_negatives = self.test_df[['text', 'word']][
(self.test_data.predicted_is_metaphor_vec == 0) &
(self.test_data.is_metaphor_vec == 0)
]
self.true_positives = self.test_df[['text', 'word']][
(self.test_data.predicted_is_metaphor_vec == 1) &
(self.test_data.is_metaphor_vec == 1)
]
self.true = self.test_data.is_metaphor_vec
self.pred = self.test_data.predicted_is_metaphor_vec
# Compute performance measures.
# http://scikit-learn.org/stable/modules/model_evaluation.html
self.confusion_matrix = \
skmetrics.confusion_matrix(self.true, self.pred)
tn, fp, fn, tp = self.confusion_matrix.ravel()
self.accuracy = (tp + tn) / (tp + tn + fp + fn)
self.precision = tp / (tp + fp) # how often a positive is true pos.
self.sensitivity = tp / (tp + fn) # recall, true positive rate
self.specificity = tn / (tn + fp) # true negative rate
self.auc = skmetrics.roc_auc_score(
self.true, self.test_data.probabilities[:, 1]
)
def word_counts(self, n_most_common=10):
'''
For both the false negatives and false positives, create word counts
Returns:
(dict): keyed by false_{negative,positive} true_{negative,positive}
with word counts as values
'''
def count_words(df, n_most_common):
'Count of most common words in window of text/focal_word'
# Compact way to flatten list of all windowings and do word count.
words = (
word
for row in df.as_matrix()
for word in get_window(row[0], row[1], 5)
)
return Counter(words).most_common(n_most_common)
return {
k: count_words(getattr(self, k), n_most_common)
for k in [
'false_negatives',
'false_positives',
'true_negatives',
'true_positives'
]
}
| 3,028 | 32.285714 | 79 | py |
metacorps-nn | metacorps-nn-master/n_layers_experiment.py | import sys
import pandas as pd
from modelrun import ModelRun
verbose = True
n_nodes = 500
# Keeping a ModelRun allows us to not have to re-load GoogleNews model.
rows = [] # Used to build data frame and latex table.
w2v_model_loc='GoogleNews-vectors-negative300.bin'
if len(sys.argv) > 1:
run_directory = sys.argv[1]
learning_rate = float(sys.argv[2])
else:
run_directory = 'test_hyperparam_test'
mr = ModelRun(run_directory=run_directory,
limit_word2vec=300000,
w2v_model_loc=w2v_model_loc,
learning_rate=learning_rate)
for idx, n_hidden_layers in enumerate([1] + list(range(2, 9, 2))):
# Parameter specifying number of 500-node hidden layers.
# Confusing name, TODO change to hidden_layers all the way down.
mr.n_hidden = [n_nodes for _ in range(n_hidden_layers)]
if verbose:
print('Running with mr.n_hidden =', mr.n_hidden)
ev = mr.run(n_epochs=120, verbose=verbose, early_stopping_limit=25)
rows.append([
'NN{}'.format(len(mr.n_hidden)),
ev.sensitivity,
ev.specificity,
ev.precision,
ev.auc
])
modelruns_table = pd.DataFrame(
columns=['Method', 'Sens.', 'Spec.', 'Prec.', 'AUC'],
data=rows
)
with open('PerformanceTable.tex', 'w') as f:
modelruns_table.to_latex(f, float_format='%0.3f')
| 1,344 | 27.020833 | 71 | py |
CD-Flow | CD-Flow-main/main.py | import torch
from trainnet import trainNet
import pandas as pd
import argparse
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=100)
parser.add_argument("--resume_path", type=str, default=None)
parser.add_argument("--learning_rate", type=float, default=1e-5)
parser.add_argument("--scheduler_step", type=int, default=5)
parser.add_argument("--scheduler_gamma", type=float, default=0.5)
parser.add_argument("--batch_size_train", type=int, default=4)
parser.add_argument("--batch_size_test", type=int, default=4)
parser.add_argument("--n_epochs", type=int, default=50)
parser.add_argument("--training_datadir", type=str, default='')
parser.add_argument("--colorspace", type=str, default='rgb')
parser.add_argument("--trainpath1", type=str, default='trainnet.py')
parser.add_argument("--trainpath2", type=str, default='main.py')
parser.add_argument("--trainpath3", type=str, default='model.py')
parser.add_argument("--trainpath4", type=str, default='DataLoader.py')
parser.add_argument("--work_path", type=str, default='work_dir')
parser.add_argument("--datapath", type=str, default='data')
parser.add_argument("--trainset", type=str, default='train.csv')
parser.add_argument("--valset", type=str, default='val.csv')
parser.add_argument("--testset", type=str, default='test.csv')
parser.add_argument("--test_aligned_path", type=str, default=None)
parser.add_argument("--test_notaligned_path", type=str, default=None)
return parser.parse_args()
if __name__ == '__main__':
config = parse_config()
path = config.datapath
modelprediction = pd.DataFrame(columns=['no'])
modelprediction_aligned = pd.DataFrame(columns=['no'])
modelprediction_notaligned = pd.DataFrame(columns=['no'])
work_path = config.work_path
trainpath = config.trainset
valpath = config.valset
testpath = config.testset
performance = pd.DataFrame(columns=['stress', 'plcc', 'srcc', 'stress_aligned', 'plcc_aligned', 'srcc_aligned', 'stress_notaligned', 'plcc_notaligned', 'srcc_notaligned'])
torch.cuda.empty_cache()
i = 0
config.datapath = path+'/{}.csv'.format(i+1)
config.work_path = work_path+'/{}'.format(i+1)
config.trainset = path+'/{}/'.format(i+1)+trainpath
config.valset = path+'/{}/'.format(i+1)+valpath
config.testset = path+'/{}/'.format(i+1)+testpath
config.test_aligned_path = path+'/{}/test_aligned.csv'.format(i+1)
config.test_notaligned_path = path+'/{}/test_notaligned.csv'.format(i+1)
dist1, y_true1, stress1, cc_v1, srocc_v1, dist2, y_true2, stress2, cc_v2, srocc_v2,\
dist3, y_true3, stress3, cc_v3, srocc_v3 = trainNet(config, i)
performance.loc['{}'.format(i), 'stress'] = stress1
performance.loc['{}'.format(i), 'plcc'] = cc_v1
performance.loc['{}'.format(i), 'srcc'] = srocc_v1
performance.loc['{}'.format(i), 'stress_aligned'] = stress2
performance.loc['{}'.format(i), 'plcc_aligned'] = cc_v2
performance.loc['{}'.format(i), 'srcc_aligned'] = srocc_v2
performance.loc['{}'.format(i), 'stress_notaligned'] = stress3
performance.loc['{}'.format(i), 'plcc_notaligned'] = cc_v3
performance.loc['{}'.format(i), 'srcc_notaligned'] = srocc_v3
performance.to_csv(config.work_path + '/modelperformance.csv', index=None)
| 3,377 | 48.676471 | 175 | py |
CD-Flow | CD-Flow-main/test.py | import time
from EMA import EMA
import torch
from torch.utils.data import DataLoader
from model import CDFlow
from DataLoader import CD_128
from coeff_func import *
import os
from loss import createLossAndOptimizer
from torch.autograd import Variable
import torchvision
import torch.autograd as autograd
from function import setup_seed, copy_codes
import argparse
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size_test", type=int, default=4)
parser.add_argument("--work_path", type=str, default='work_dir')
parser.add_argument("--datapath", type=str, default='data')
parser.add_argument("--dataset", type=str, default='')
parser.add_argument("--testset", type=str, default='test.csv')
parser.add_argument("--test_aligned_path", type=str, default=None)
parser.add_argument("--test_notaligned_path", type=str, default=None)
return parser.parse_args()
def test(data_val_loader, net):
dist = []
y_true = []
for i, data in enumerate(data_val_loader, 0):
with torch.no_grad():
x, y, gts = data
y_val = gts.numpy()
x, y, gts = \
Variable(x).cuda(), \
Variable(y).cuda(), \
Variable(gts).cuda()
score, _, _, _, _, _, _, _, _, _ = net(x, y)
pred = (torch.squeeze(score)).cpu().detach().numpy().tolist()
if isinstance(pred, list):
dist.extend(pred)
y_true.extend(y_val.tolist())
else:
dist.append(np.array(pred))
y_true.append(y_val)
dist_np = np.array(dist)
y_true_np = np.array(y_true).squeeze()
stress = compute_stress(dist_np, y_true_np)
_, cc_v, srocc_v, krocc_v, rmse_v = coeff_fit(dist_np, y_true_np)
return srocc_v, cc_v, stress, dist, y_true
config = parse_config()
path = config.datapath
work_path = config.work_path
testpath = config.testset
workspace = work_path + '/{}'.format(1)
testset = path + '/{}/'.format(1) + testpath
test_aligned_path = path + '/{}/test_aligned.csv'.format(1)
test_notaligned_path = path + '/{}/test_notaligned.csv'.format(1)
datadir = config.dataset
batch_size_test = config.batch_size_test
test_pairs = np.genfromtxt(open(testset, encoding='UTF-8-sig'), delimiter=',', dtype=str)
test_aligned_pairs = np.genfromtxt(open(test_aligned_path), delimiter=',', dtype=str)
test_notaligned_pairs = np.genfromtxt(open(test_notaligned_path), delimiter=',', dtype=str)
data_test = CD_128(test_pairs[:], root_dir=datadir, test=True)
test_aligned = CD_128(test_aligned_pairs[:], root_dir=datadir, test=True)
test_notaligned = CD_128(test_notaligned_pairs[:], root_dir=datadir, test=True)
data_test_loader = DataLoader(data_test, batch_size=batch_size_test, shuffle=False, pin_memory=True, num_workers=4)
data_test_aligned_loader = DataLoader(test_aligned, batch_size=batch_size_test, shuffle=False, pin_memory=True,
num_workers=4)
data_test_notaligned_loader = DataLoader(test_notaligned, batch_size=batch_size_test, shuffle=False, pin_memory=True,
num_workers=4)
print('#############################################################################')
print("Testing...")
print('#############################################################################')
device = torch.device("cuda")
pt = os.path.join(workspace, 'checkpoint_best', 'ModelParams_Best_val.pt')
checkpoint = torch.load(pt)
net = CDFlow().cuda()
net = torch.nn.DataParallel(net).cuda()
net.load_state_dict(checkpoint['state_dict'])
net.eval()
srocc_v1, cc_v1, stress1, dist1, y_true1 = test(data_test_loader, net)
print('All: plcc{}; srcc{}; stress{}'.format(cc_v1, srocc_v1, stress1))
srocc_v2, cc_v2, stress2, dist2, y_true2 = test(data_test_aligned_loader, net)
print('Pixel-wise aligned: plcc{}; srcc{}; stress{}'.format(cc_v2, srocc_v2, stress2))
srocc_v3, cc_v3, stress3, dist3, y_true3 = test(data_test_notaligned_loader, net)
print('Non-Pixel-wise aligned: plcc{}; srcc{}; stress{}'.format(cc_v3, srocc_v3, stress3))
| 4,109 | 41.8125 | 117 | py |
CD-Flow | CD-Flow-main/flow.py | import torch
from torch import nn
from torch.nn import functional as F
from math import log, pi, exp
import numpy as np
from scipy import linalg as la
logabs = lambda x: torch.log(torch.abs(x))
class ActNorm(nn.Module):
def __init__(self, in_channel, logdet=True):
super().__init__()
self.loc = nn.Parameter(torch.zeros(1, in_channel, 1, 1))
self.scale = nn.Parameter(torch.ones(1, in_channel, 1, 1))
self.register_buffer("initialized", torch.tensor(0, dtype=torch.uint8))
self.logdet = logdet
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input):
_, _, height, width = input.shape
if self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
log_abs = logabs(self.scale)
logdet = height * width * torch.sum(log_abs)
if self.logdet:
return self.scale * (input + self.loc), logdet
else:
return self.scale * (input + self.loc)
def reverse(self, output):
return output / self.scale - self.loc
class InvConv2d(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = torch.randn(in_channel, in_channel)
q, _ = torch.qr(weight)
weight = q.unsqueeze(2).unsqueeze(3)
self.weight = nn.Parameter(weight)
def forward(self, input):
_, _, height, width = input.shape
out = F.conv2d(input, self.weight)
logdet = (
height * width * torch.slogdet(self.weight.squeeze().double())[1].float()
)
return out, logdet
def reverse(self, output):
return F.conv2d(
output, self.weight.squeeze().inverse().unsqueeze(2).unsqueeze(3)
)
class InvConv2dLU(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = np.random.randn(in_channel, in_channel)
q, _ = la.qr(weight)
w_p, w_l, w_u = la.lu(q.astype(np.float32))
w_s = np.diag(w_u)
w_u = np.triu(w_u, 1)
u_mask = np.triu(np.ones_like(w_u), 1)
l_mask = u_mask.T
w_p = torch.from_numpy(w_p)
w_l = torch.from_numpy(w_l)
w_s = torch.from_numpy(w_s)
w_u = torch.from_numpy(w_u)
self.register_buffer("w_p", w_p)
self.register_buffer("u_mask", torch.from_numpy(u_mask))
self.register_buffer("l_mask", torch.from_numpy(l_mask))
self.register_buffer("s_sign", torch.sign(w_s))
self.register_buffer("l_eye", torch.eye(l_mask.shape[0]))
self.w_l = nn.Parameter(w_l)
self.w_s = nn.Parameter(logabs(w_s))
self.w_u = nn.Parameter(w_u)
def forward(self, input):
_, _, height, width = input.shape
weight = self.calc_weight()
out = F.conv2d(input, weight)
logdet = height * width * torch.sum(self.w_s)
return out, logdet
def calc_weight(self):
weight = (
self.w_p
@ (self.w_l * self.l_mask + self.l_eye)
@ ((self.w_u * self.u_mask) + torch.diag(self.s_sign * torch.exp(self.w_s)))
)
return weight.unsqueeze(2).unsqueeze(3)
def reverse(self, output):
weight = self.calc_weight()
return F.conv2d(output, weight.squeeze().inverse().unsqueeze(2).unsqueeze(3))
class ZeroConv2d(nn.Module):
def __init__(self, in_channel, out_channel, padding=1):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0)
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input):
out = F.pad(input, [1, 1, 1, 1], value=1)
out = self.conv(out)
out = out * torch.exp(self.scale * 3)
return out
class AffineCoupling(nn.Module):
def __init__(self, in_channel, filter_size=512, affine=True):
super().__init__()
self.affine = affine
self.net = nn.Sequential(
nn.Conv2d(in_channel // 2, filter_size, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(filter_size, filter_size, 1),
nn.ReLU(inplace=True),
ZeroConv2d(filter_size, in_channel if self.affine else in_channel // 2),
)
self.net[0].weight.data.normal_(0, 0.05)
self.net[0].bias.data.zero_()
self.net[2].weight.data.normal_(0, 0.05)
self.net[2].bias.data.zero_()
def forward(self, input):
in_a, in_b = input.chunk(2, 1)
if self.affine:
log_s, t = self.net(in_a).chunk(2, 1)
s = F.sigmoid(log_s + 2)
out_b = (in_b + t) * s
logdet = torch.sum(torch.log(s).view(input.shape[0], -1), 1)
else:
net_out = self.net(in_a)
out_b = in_b + net_out
logdet = None
return torch.cat([in_a, out_b], 1), logdet
def reverse(self, output):
out_a, out_b = output.chunk(2, 1)
if self.affine:
log_s, t = self.net(out_a).chunk(2, 1)
# s = torch.exp(log_s)
s = F.sigmoid(log_s + 2)
# in_a = (out_a - t) / s
in_b = out_b / s - t
else:
net_out = self.net(out_a)
in_b = out_b - net_out
return torch.cat([out_a, in_b], 1)
class Flow(nn.Module):
def __init__(self, in_channel, affine=True, conv_lu=True):
super().__init__()
self.actnorm = ActNorm(in_channel)
if conv_lu:
self.invconv = InvConv2dLU(in_channel)
else:
self.invconv = InvConv2d(in_channel)
self.coupling = AffineCoupling(in_channel, affine=affine)
def forward(self, input):
out, logdet = self.actnorm(input)
out, det1 = self.invconv(out)
out, det2 = self.coupling(out)
logdet = logdet + det1
if det2 is not None:
logdet = logdet + det2
return out, logdet
def reverse(self, output):
input = self.coupling.reverse(output)
input = self.invconv.reverse(input)
input = self.actnorm.reverse(input)
return input
def gaussian_log_p(x, mean, log_sd):
return -0.5 * log(2 * pi) - log_sd - 0.5 * (x - mean) ** 2 / torch.exp(2 * log_sd)
def gaussian_sample(eps, mean, log_sd):
return mean + torch.exp(log_sd) * eps
class Block(nn.Module):
def __init__(self, in_channel, n_flow, split=True, affine=True, conv_lu=True):
super().__init__()
squeeze_dim = in_channel * 4
self.flows = nn.ModuleList()
for i in range(n_flow):
self.flows.append(Flow(squeeze_dim, affine=affine, conv_lu=conv_lu))
self.split = split
if split:
self.prior = ZeroConv2d(in_channel * 2, in_channel * 4)
else:
self.prior = ZeroConv2d(in_channel * 4, in_channel * 8)
def forward(self, input):
b_size, n_channel, height, width = input.shape
squeezed = input.view(b_size, n_channel, height // 2, 2, width // 2, 2)
squeezed = squeezed.permute(0, 1, 3, 5, 2, 4)
out = squeezed.contiguous().view(b_size, n_channel * 4, height // 2, width // 2)
logdet = 0
for flow in self.flows:
out, det = flow(out)
logdet = logdet + det
if self.split:
out, z_new = out.chunk(2, 1)
mean, log_sd = self.prior(out).chunk(2, 1)
log_p = gaussian_log_p(z_new, mean, log_sd)
log_p = log_p.view(b_size, -1).sum(1)
else:
one = torch.ones_like(out)
mean, log_sd = self.prior(one).chunk(2, 1)
log_p = gaussian_log_p(out, mean, log_sd)
log_p = log_p.view(b_size, -1).sum(1)
z_new = out
#self.log_sd = log_sd
return out, logdet, log_p, z_new
def reverse(self, output, eps=None, reconstruct=False):
input = output
if reconstruct:
if self.split:
input = torch.cat([output, eps], 1) ## channel-wise concat
else:
input = eps
else:
if self.split:
mean, log_sd = self.prior(input).chunk(2, 1)
z = gaussian_sample(eps, mean, log_sd)
input = torch.cat([output, z], 1)
else:
one = torch.ones_like(input)
mean, log_sd = self.prior(one).chunk(2, 1)
z = gaussian_sample(eps, mean, log_sd)
input = z
for flow in self.flows[::-1]:
input = flow.reverse(input)
b_size, n_channel, height, width = input.shape
unsqueezed = input.view(b_size, n_channel // 4, 2, 2, height, width)
unsqueezed = unsqueezed.permute(0, 1, 4, 2, 5, 3)
unsqueezed = unsqueezed.contiguous().view(
b_size, n_channel // 4, height * 2, width * 2
)
return unsqueezed
class Glow(nn.Module):
def __init__(
self, in_channel, n_flow, n_block, affine=True, conv_lu=True
):
super().__init__()
self.blocks = nn.ModuleList()
n_channel = in_channel
for i in range(n_block - 1):
self.blocks.append(Block(n_channel, n_flow, affine=affine, conv_lu=conv_lu))
n_channel *= 2
self.blocks.append(Block(n_channel, n_flow, split=False, affine=affine))
def forward(self, input):
log_p_sum = 0
logdet = 0
out = input
z_outs = []
for i, block in enumerate(self.blocks):
out, det, log_p, z_new = block(out)
z_outs.append(z_new)
logdet = logdet + det
if log_p is not None:
log_p_sum = log_p_sum + log_p
return log_p_sum, logdet, z_outs
def reverse(self, z_list, reconstruct=True, cd_map=False):
for i, block in enumerate(self.blocks[::-1]):
if i == 0:
input = block.reverse(z_list[-1], z_list[-1], reconstruct=reconstruct)
else:
input = block.reverse(input, z_list[-(i + 1)], reconstruct=reconstruct)
return input
| 10,847 | 28.720548 | 88 | py |
CD-Flow | CD-Flow-main/DataLoader.py | import os
import torch
import random
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
import torchvision
class CD_128(Dataset):
def __init__(self, jnd_info, root_dir, test=False):
self.ref_name = jnd_info[:, 0]
self.test_name = jnd_info[:, 1]
self.root_dir = str(root_dir)
self.gt = jnd_info[:, 2]
self.test = test
if test == False:
self.trans_org = transforms.Compose([
transforms.Resize(1024),
transforms.RandomRotation(3),
transforms.RandomCrop(1000),
transforms.Resize(768),
transforms.ToTensor(),
])
else:
self.trans_org = transforms.Compose([
transforms.Resize(1024),
transforms.CenterCrop(1024),
transforms.ToTensor(),
])
def __len__(self):
return len(self.gt)
def __getitem__(self, idx):
gt = float(self.gt[idx])
full_address = os.path.join(self.root_dir, str(self.ref_name[idx]))
ref = Image.open(full_address).convert("RGB")
ref1 = self.trans_org(ref)
full_address_test = os.path.join(self.root_dir, str(self.test_name[idx]))
test = Image.open(full_address_test).convert("RGB")
test1 = self.trans_org(test)
return ref1, test1, gt
| 1,425 | 30 | 81 | py |
CD-Flow | CD-Flow-main/loss.py | import torch
import numpy as np
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
def createLossAndOptimizer(net, learning_rate, scheduler_step, scheduler_gamma):
loss = LossFunc()
# optimizer = optim.Adam([{'params': net.parameters(), 'lr':learning_rate}], lr = learning_rate, weight_decay=5e-4)
optimizer = optim.Adam([{'params': net.parameters(), 'lr': learning_rate}], lr=learning_rate, eps=1e-7)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=scheduler_gamma)
return loss, optimizer, scheduler
class LossFunc(torch.nn.Module):
def __init__(self):
super(LossFunc, self).__init__()
def mse_loss(self, score, label):
score = torch.squeeze(score)
return torch.mean((score - label) ** 2)
def forward(self, score, label):
mse = self.mse_loss(score, label)
return mse
| 909 | 34 | 119 | py |
CD-Flow | CD-Flow-main/model.py | import math
import time
import torch
import torch.nn as nn
from flow import *
import os
class CDFlow(nn.Module):
def __init__(self):
super(CDFlow, self).__init__()
self.glow = Glow(3, 8, 6, affine=True, conv_lu=True)
def coordinate_transform(self, x_hat, rev=False):
if not rev:
log_p, logdet, x_hat = self.glow(x_hat)
return log_p, logdet, x_hat
else:
x_hat = self.glow.reverse(x_hat)
return x_hat
def forward(self, x, y):
log_p_x, logdet_x, x_hat = self.coordinate_transform(x, rev=False)
log_p_y, logdet_y, y_hat = self.coordinate_transform(y, rev=False)
x_hat_1, y_hat_1 = x_hat[0].view(x_hat[0].shape[0], -1), y_hat[0].view(x_hat[0].shape[0], -1)
x_hat_2, y_hat_2 = x_hat[1].view(x_hat[1].shape[0], -1), y_hat[1].view(x_hat[1].shape[0], -1)
x_hat_3, y_hat_3 = x_hat[2].view(x_hat[2].shape[0], -1), y_hat[2].view(x_hat[2].shape[0], -1)
x_hat_4, y_hat_4 = x_hat[3].view(x_hat[3].shape[0], -1), y_hat[3].view(x_hat[3].shape[0], -1)
x_hat_5, y_hat_5 = x_hat[4].view(x_hat[4].shape[0], -1), y_hat[4].view(x_hat[4].shape[0], -1)
x_hat_6, y_hat_6 = x_hat[5].view(x_hat[5].shape[0], -1), y_hat[5].view(x_hat[5].shape[0], -1)
x_cat_65 = torch.cat((x_hat_6, x_hat_5), dim=1)
y_cat_65 = torch.cat((y_hat_6, y_hat_5), dim=1)
x_cat_654 = torch.cat((x_hat_6, x_hat_5, x_hat_4), dim=1)
y_cat_654 = torch.cat((y_hat_6, y_hat_5, y_hat_4), dim=1)
x_cat_6543 = torch.cat((x_hat_6, x_hat_5, x_hat_4, x_hat_3), dim=1)
y_cat_6543 = torch.cat((y_hat_6, y_hat_5, y_hat_4, y_hat_3), dim=1)
x_cat_65432 = torch.cat((x_hat_6, x_hat_5, x_hat_4, x_hat_3, x_hat_2), dim=1)
y_cat_65432 = torch.cat((y_hat_6, y_hat_5, y_hat_4, y_hat_3, y_hat_2), dim=1)
x_cat_654321 = torch.cat((x_hat_6, x_hat_5, x_hat_4, x_hat_3, x_hat_2, x_hat_1), dim=1)
y_cat_654321 = torch.cat((y_hat_6, y_hat_5, y_hat_4, y_hat_3, y_hat_2, y_hat_1), dim=1)
mse6 = (x_hat_6 - y_hat_6).view(x_hat_6.shape[0], -1)
mse6 = mse6.unsqueeze(1)
mse6 = torch.sqrt(1e-8 + torch.matmul(mse6, mse6.transpose(dim0=-2, dim1=-1))/mse6.shape[2])
mse6 = mse6.squeeze(2)
mse65 = (x_cat_65 - y_cat_65).view(x_cat_65.shape[0], -1)
mse65 = mse65.unsqueeze(1)
mse65 = torch.sqrt(1e-8 + torch.matmul(mse65, mse65.transpose(dim0=-2, dim1=-1))/mse65.shape[2])
mse65 = mse65.squeeze(2)
mse654 = (x_cat_654 - y_cat_654).view(x_cat_654.shape[0], -1)
mse654 = mse654.unsqueeze(1)
mse654 = torch.sqrt(1e-8 + torch.matmul(mse654, mse654.transpose(dim0=-2, dim1=-1))/mse654.shape[2])
mse654 = mse654.squeeze(2)
mse6543 = (x_cat_6543 - y_cat_6543).view(x_cat_6543.shape[0], -1)
mse6543 = mse6543.unsqueeze(1)
mse6543 = torch.sqrt(1e-8 + torch.matmul(mse6543, mse6543.transpose(dim0=-2, dim1=-1))/mse6543.shape[2])
mse6543 = mse6543.squeeze(2)
mse65432 = (x_cat_65432 - y_cat_65432).view(x_cat_65432.shape[0], -1)
mse65432 = mse65432.unsqueeze(1)
mse65432 = torch.sqrt(1e-8 + torch.matmul(mse65432, mse65432.transpose(dim0=-2, dim1=-1)) / mse65432.shape[2])
mse65432 = mse65432.squeeze(2)
mse654321 = (x_cat_654321 - y_cat_654321).view(x_cat_654321.shape[0], -1)
mse654321 = mse654321.unsqueeze(1)
mse654321 = torch.sqrt(1e-8 + torch.matmul(mse654321, mse654321.transpose(dim0=-2, dim1=-1)) / mse654321.shape[2])
mse654321 = mse654321.squeeze(2)
return mse654321, mse65432, mse6543, mse654, mse65, mse6, log_p_x, logdet_x, log_p_y, logdet_y
| 3,702 | 47.090909 | 122 | py |
CD-Flow | CD-Flow-main/EMA.py | class EMA():
def __init__(self, model, decay):
self.model = model
self.decay = decay
self.shadow = {}
self.backup = {}
def register(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def update(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = (1.0 - self.decay) * param.data + self.decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def apply_shadow(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.backup[name] = param.data
param.data = self.shadow[name]
def restore(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
| 1,138 | 32.5 | 94 | py |
CD-Flow | CD-Flow-main/function.py | import shutil
import random
import torch
import numpy as np
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def copy_codes(trainpath1,trainpath2,trainpath3,trainpath4, path1,path2,path3,path4):
shutil.copyfile(trainpath1, path1)
shutil.copyfile(trainpath2, path2)
shutil.copyfile(trainpath3, path3)
shutil.copyfile(trainpath4, path4)
| 484 | 25.944444 | 85 | py |
CD-Flow | CD-Flow-main/coeff_func.py | from cgi import print_form
import numpy as np
import pandas as pd
from scipy.stats.stats import pearsonr, spearmanr, kendalltau
from scipy.optimize import fmin
from math import sqrt
from sklearn.metrics import mean_squared_error
def logistic(t, x):
return 0.5 - (1 / (1 + np.exp(t * x)))
def fitfun(t, x):
res = t[0] * (logistic(t[1], (x-t[2]))) + t[3] + t[4] * x
return res
def errfun(t, x, y):
return np.sum(np.power(y - fitfun(t, x),2))
def fitfun_4para(t, x):
res = t[0] * (logistic(t[1], (x-t[2]))) + t[3]
return res
def errfun_4para(t, x, y):
return np.sum(np.power(y - fitfun(t, x),2))
def RMSE(y_actual, y_predicted):
rmse = sqrt(mean_squared_error(y_actual, y_predicted))
return rmse
def coeff_fit(Obj,y):
temp = pearsonr(Obj, y)
t = np.zeros(5)
t[2] = np.mean(Obj)
t[3] = np.mean(y)
t[1] = 1/np.std(Obj)
t[0] = abs(np.max(y) - np.min(y))
t[4] = -1
signslope = 1
if temp[1]<=0:
t[0] *= -1
signslope *= -1
v = [t, Obj, y]
tt = fmin(errfun, t, args=(Obj, y))
fit = fitfun(tt, Obj)
cc = pearsonr(fit, y)[0]
# print("plcc")
srocc = spearmanr(fit, y).correlation
# print("srcc")
krocc = kendalltau(fit, y).correlation
# print("krocc")
rmse = RMSE( np.absolute(y), np.absolute(fit) )
# print("Rmse")
return fit, cc, srocc, krocc, rmse
def compute_stress(de,dv): #obj->delta E y->subjective->dV
fcv = np.sum(de*de)/np.sum(de*dv)
STRESS = 100*sqrt(np.sum((de-fcv*dv)*(de-fcv*dv))/(fcv*fcv*np.sum(dv*dv)))
return STRESS
| 1,583 | 23 | 78 | py |
CD-Flow | CD-Flow-main/trainnet.py | import time
from EMA import EMA
import torch
from torch.utils.data import DataLoader
from model import CDFlow
from DataLoader import CD_128
from coeff_func import *
import os
from loss import createLossAndOptimizer
from torch.autograd import Variable
import torch.autograd as autograd
from function import setup_seed, copy_codes
from math import log
def trainNet(config, times):
resume_path = config.resume_path
learning_rate = config.learning_rate
scheduler_step = config.scheduler_step
scheduler_gamma = config.scheduler_gamma
batch_size_train = config.batch_size_train
batch_size_test = config.batch_size_test
n_epochs = config.n_epochs
training_datadir = config.training_datadir
colorspace = config.colorspace
trainpath1 = config.trainpath1
trainpath2 = config.trainpath2
trainpath3 = config.trainpath3
trainpath4 = config.trainpath4
workspace = config.work_path
device = torch.device("cuda")
# set random seed
setup_seed(config.seed)
if not os.path.exists(workspace):
os.mkdir(workspace)
if not os.path.exists(os.path.join(workspace, 'codes')):
os.mkdir(os.path.join(workspace, 'codes'))
if not os.path.exists(os.path.join(workspace, 'checkpoint')):
os.mkdir(os.path.join(workspace, 'checkpoint'))
if not os.path.exists(os.path.join(workspace, 'checkpoint_best')):
os.mkdir(os.path.join(workspace, 'checkpoint_best'))
copy_codes(trainpath1=trainpath1, trainpath2=trainpath2, trainpath3=trainpath3, trainpath4=trainpath4,
path1=os.path.join(workspace, 'codes/trainNet.py'), path2=os.path.join(workspace, 'codes/main.py'),
path3=os.path.join(workspace, 'codes/net.py'), path4=os.path.join(workspace, 'codes/DataLoader.py'))
print("============ HYPERPARAMETERS ==========")
print("batch_size_train and test=", batch_size_train, batch_size_test)
print("epochs=", n_epochs)
print('learning rate=', learning_rate)
print('scheduler_step=', scheduler_step)
print('scheduler_gamma=', scheduler_gamma)
print('training dir=', training_datadir)
print('colorspace=', colorspace)
print(config.trainset)
print(config.valset)
print(config.testset)
print(config.test_aligned_path)
print(config.test_notaligned_path)
train_pairs = np.genfromtxt(open(config.trainset, encoding='UTF-8-sig'), delimiter=',', dtype=str)
val_pairs = np.genfromtxt(open(config.valset, encoding='UTF-8-sig'), delimiter=',', dtype=str)
test_pairs = np.genfromtxt(open(config.testset, encoding='UTF-8-sig'), delimiter=',', dtype=str)
test_aligned_pairs = np.genfromtxt(open(config.test_aligned_path), delimiter=',', dtype=str)
test_notaligned_pairs = np.genfromtxt(open(config.test_notaligned_path), delimiter=',', dtype=str)
data_train = CD_128(train_pairs[:], root_dir=training_datadir, test=False)
data_val = CD_128(val_pairs[:], root_dir=training_datadir, test=True)
data_test = CD_128(test_pairs[:], root_dir=training_datadir, test=True)
test_aligned = CD_128(test_aligned_pairs[:], root_dir=training_datadir, test=True)
test_notaligned = CD_128(test_notaligned_pairs[:], root_dir=training_datadir, test=True)
net = CDFlow().to(device)
net = torch.nn.DataParallel(net)
net = net.to(device)
loss, optimizer, scheduler = createLossAndOptimizer(net, learning_rate, scheduler_step, scheduler_gamma)
data_train_loader = DataLoader(data_train, batch_size=batch_size_train, shuffle=True,
pin_memory=True, num_workers=4)
data_val_loader = DataLoader(data_val, batch_size=batch_size_test, shuffle=True, pin_memory=True,
num_workers=4)
data_test_loader = DataLoader(data_test, batch_size=batch_size_test, shuffle=False,
pin_memory=True, num_workers=4)
data_test_aligned_loader = DataLoader(test_aligned, batch_size=batch_size_test, shuffle=False,
pin_memory=True, num_workers=4)
data_test_notaligned_loader = DataLoader(test_notaligned, batch_size=batch_size_test,
shuffle=False, pin_memory=True, num_workers=4)
if resume_path is not None:
checkpoint = torch.load(resume_path)
start_epoch = checkpoint['epoch'] + 1
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('continue to train: shuffle{} epoch{} '.format(times + 1, start_epoch))
else:
start_epoch = 0
training_start_time = time.time()
rows, columns = train_pairs.shape
n_batches = rows // batch_size_train
valsrcc = 0
ema = EMA(net, 0.999)
ema.register()
autograd.set_detect_anomaly(True)
for epoch in range(start_epoch, n_epochs):
# initiate parameters for statistic recordings.
dist = []
y_true = []
running_loss = 0.0
total_train_loss = 0
start_time = time.time()
print_every = 20
train_counter = 0
net.train()
print("---------------------train mode-------epoch{}--------------------------".format(epoch))
for i, data in enumerate(data_train_loader, 0):
train_counter = train_counter + 1
x, y, gts = data
y_val = gts.numpy()
x, y, gts = \
Variable(x).to(device), \
Variable(y).to(device), \
Variable(gts).to(device)
optimizer.zero_grad()
score, score65432, score6543, score654, score65, score6, log_p_x, logdet_x, log_p_y, logdet_y = net(x, y)
logdet_x = logdet_x.mean()
logdet_y = logdet_y.mean()
loss_x, log_p_x, log_det_x = calc_loss(log_p_x, logdet_x, 768, 2.0 ** 5)
loss_y, log_p_y, log_det_y = calc_loss(log_p_y, logdet_y, 768, 2.0 ** 5)
score_loss = 10 * loss(score, gts) + loss(score65432, gts) + loss(score6543, gts) + loss(score654, gts) + loss(score65, gts) + loss(score6, gts)
loss_size = 10 * score_loss + loss_x + loss_y
loss_size.backward()
optimizer.step()
ema.update()
running_loss += loss_size.item()
total_train_loss += loss_size.item()
pred = (torch.squeeze(score)).cpu().detach().numpy().tolist()
if isinstance(pred, list):
dist.extend(pred)
y_true.extend(y_val.tolist())
else:
dist.append(np.array(pred))
y_true.append(y_val)
if (i + 1) % (print_every + 1) == 0:
print("Epoch {}, {:d}% \t train_loss: {:.6f} took: {:.2f}s".format(
epoch + 1, int(100 * (i + 1) / n_batches), running_loss / print_every, time.time() - start_time))
running_loss = 0.0
start_time = time.time()
torch.save(
{"state_dict": net.state_dict(), 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'times': times}, \
os.path.join(workspace, 'checkpoint', 'ModelParams_checkpoint.pt'))
# Calculate correlation coefficients between the predicted values and ground truth values on training set.
dist = np.array(dist).squeeze()
y_true = np.array(y_true).squeeze()
_, cc_v, srocc_v, krocc_v, rmse_v = coeff_fit(dist, y_true)
print("Training set: PCC{:.4}, SROCC{:.4}, KROCC{:.4}, RMSE{:.4}".format(cc_v, srocc_v, krocc_v, rmse_v))
# validation
# EMA
ema.apply_shadow()
# EMA
net.eval()
print("----------------------------validation mode---------------------------------")
srocc_v, total_val_loss, val_counter, cc_v, krocc_v, rmse_v, stress, dist, y_true, score_val = test(
data_val_loader, net, loss)
# srocc_a, total_val_loss_a, val_counter_a, cc_a, krocc_a, rmse_a, stress_a, dist_a, y_true_a, score_a = test(
# data_test_aligned_loader, net, loss)
# srocc_na, total_val_loss_na, val_counter_na, cc_na, krocc_na, rmse_na, stress_na, dist_na, y_true_na, score_na = test(
# data_test_notaligned_loader, net, loss)
if srocc_v > valsrcc:
valsrcc = srocc_v
torch.save({"state_dict": net.state_dict()},
os.path.join(workspace, 'checkpoint_best', 'ModelParams_Best_val.pt'))
print('update best model...')
print("VALIDATION: PCC{:.4}, SROCC{:.4}, STRESS{:.4}, RMSE{:.4}".format(cc_v, srocc_v, stress, rmse_v))
print("loss = {:.6}".format(total_val_loss / val_counter))
# EMA
ema.restore()
# EMA
scheduler.step()
print('#############################################################################')
print("Training finished, took {:.2f}s".format(time.time() - training_start_time))
pt = os.path.join(workspace, 'checkpoint_best', 'ModelParams_Best_val.pt')
checkpoint = torch.load(pt)
net = CDFlow().to(device)
net = torch.nn.DataParallel(net).to(device)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
srocc_v1, total_val_loss, val_counter, cc_v1, krocc_v, rmse_v, stress1, dist1, y_true1, score_val = test(
data_test_loader, net, loss)
print('best performance: plcc{} srcc{}'.format(cc_v1, srocc_v1))
srocc_v2, total_val_loss, val_counter, cc_v2, krocc_v, rmse_v, stress2, dist2, y_true2, score_val = test(
data_test_aligned_loader, net, loss)
print('best performance in Pixel-wise aligned: plcc{} srcc{}'.format(cc_v2, srocc_v2))
srocc_v3, total_val_loss, val_counter, cc_v3, krocc_v, rmse_v, stress3, dist3, y_true3, score_val = test(
data_test_notaligned_loader, net, loss)
print('best performance in non-Pixel-wise aligned: plcc{} srcc{}'.format(cc_v3, srocc_v3))
return dist1, y_true1, stress1, cc_v1, srocc_v1, dist2, y_true2, stress2, cc_v2, srocc_v2, dist3, y_true3, stress3, cc_v3, srocc_v3
def test(data_val_loader, net, loss):
total_val_loss = 0
val_counter = 0
score_val = 0
dist = []
y_true = []
device = torch.device("cuda")
for i, data in enumerate(data_val_loader, 0):
with torch.no_grad():
x, y, gts = data
y_val = gts.numpy()
x, y, gts = \
Variable(x).to(device), \
Variable(y).to(device), \
Variable(gts).to(device)
score, _, _, _, _, _, _, _, _, _ = net(x, y)
score_loss = loss(score, gts)
loss_size = score_loss
total_val_loss += loss_size.cpu().numpy()
score_val = score_val + score_loss.item()
val_counter += 1
pred = (torch.squeeze(score)).cpu().detach().numpy().tolist()
if isinstance(pred, list):
dist.extend(pred)
y_true.extend(y_val.tolist())
else:
dist.append(np.array(pred))
y_true.append(y_val)
# Calculate correlation coefficients between the predicted values and ground truth values on validation set.
dist_np = np.array(dist).squeeze()
y_true_np = np.array(y_true).squeeze()
stress = compute_stress(dist_np, y_true_np)
_, cc_v, srocc_v, krocc_v, rmse_v = coeff_fit(dist_np, y_true_np)
return srocc_v, total_val_loss, val_counter, cc_v, krocc_v, rmse_v, stress, dist, y_true, score_val
def calc_loss(log_p, logdet, image_size, n_bins):
n_pixel = image_size * image_size * 3
loss = -log(n_bins) * n_pixel
loss = loss + logdet + log_p
return (-loss / (log(2) * n_pixel)).mean(), (log_p / (log(2) * n_pixel)).mean(), (
logdet / (log(2) * n_pixel)).mean()
| 11,783 | 44.85214 | 156 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/setup.py | from distutils.core import setup
"""
install the packages
"""
setup(name='rl_utils',
version='0.0',
description='rl utils for the rl algorithms',
author='Tianhong Dai',
author_email='xxx@xxx.com',
url='no',
packages=['rl_utils'],
)
| 275 | 17.4 | 51 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/arguments.py | import argparse
def get_args():
parse = argparse.ArgumentParser()
parse.add_argument('--gamma', type=float, default=0.99, help='the discount factor of RL')
parse.add_argument('--seed', type=int, default=123, help='the random seeds')
parse.add_argument('--env-name', type=str, default='PongNoFrameskip-v4', help='the environment name')
parse.add_argument('--batch-size', type=int, default=32, help='the batch size of updating')
parse.add_argument('--lr', type=float, default=1e-4, help='learning rate of the algorithm')
parse.add_argument('--buffer-size', type=int, default=10000, help='the size of the buffer')
parse.add_argument('--cuda', action='store_true', help='if use the gpu')
parse.add_argument('--init-ratio', type=float, default=1, help='the initial exploration ratio')
parse.add_argument('--exploration_fraction', type=float, default=0.1, help='decide how many steps to do the exploration')
parse.add_argument('--final-ratio', type=float, default=0.01, help='the final exploration ratio')
parse.add_argument('--grad-norm-clipping', type=float, default=10, help='the gradient clipping')
parse.add_argument('--total-timesteps', type=int, default=int(1e7), help='the total timesteps to train network')
parse.add_argument('--learning-starts', type=int, default=10000, help='the frames start to learn')
parse.add_argument('--train-freq', type=int, default=4, help='the frequency to update the network')
parse.add_argument('--target-network-update-freq', type=int, default=1000, help='the frequency to update the target network')
parse.add_argument('--save-dir', type=str, default='saved_models/', help='the folder to save models')
parse.add_argument('--display-interval', type=int, default=10, help='the display interval')
parse.add_argument('--env-type', type=str, default='atari', help='the environment type')
parse.add_argument('--log-dir', type=str, default='logs', help='dir to save log information')
parse.add_argument('--use-double-net', action='store_true', help='use double dqn to train the agent')
parse.add_argument('--use-dueling', action='store_true', help='use dueling to train the agent')
args = parse.parse_args()
return args
| 2,247 | 73.933333 | 129 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/utils.py | import numpy as np
import random
# linear exploration schedule
class linear_schedule:
def __init__(self, total_timesteps, final_ratio, init_ratio=1.0):
self.total_timesteps = total_timesteps
self.final_ratio = final_ratio
self.init_ratio = init_ratio
def get_value(self, timestep):
frac = min(float(timestep) / self.total_timesteps, 1.0)
return self.init_ratio - frac * (self.init_ratio - self.final_ratio)
# select actions
def select_actions(action_value, explore_eps):
action_value = action_value.cpu().numpy().squeeze()
# select actions
action = np.argmax(action_value) if random.random() > explore_eps else np.random.randint(action_value.shape[0])
return action
# record the reward info of the dqn experiments
class reward_recorder:
def __init__(self, history_length=100):
self.history_length = history_length
# the empty buffer to store rewards
self.buffer = [0.0]
self._episode_length = 1
# add rewards
def add_rewards(self, reward):
self.buffer[-1] += reward
# start new episode
def start_new_episode(self):
if self.get_length >= self.history_length:
self.buffer.pop(0)
# append new one
self.buffer.append(0.0)
self._episode_length += 1
# get length of buffer
@property
def get_length(self):
return len(self.buffer)
@property
def mean(self):
return np.mean(self.buffer)
# get the length of total episodes
@property
def num_episodes(self):
return self._episode_length
| 1,621 | 28.490909 | 115 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.