repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
PLRDiff | PLRDiff-main/guided_diffusion/core.py | '''
copied from
https://github.com/sanghyun-son/bicubic_pytorch
A standalone PyTorch implementation for fast and efficient bicubic resampling.
The resulting values are the same to MATLAB function imresize('bicubic').
## Author: Sanghyun Son
## Email: sonsang35@gmail.com (primary), thstkdgus35@snu.ac.kr (secondary)
## Version: 1.2.0
## Last update: July 9th, 2020 (KST)
Depencency: torch
Example::
>>> import torch
>>> import core
>>> x = torch.arange(16).float().view(1, 1, 4, 4)
>>> y = core.imresize(x, sizes=(3, 3))
>>> print(y)
tensor([[[[ 0.7506, 2.1004, 3.4503],
[ 6.1505, 7.5000, 8.8499],
[11.5497, 12.8996, 14.2494]]]])
'''
import math
import typing
import torch
from torch.nn import functional as F
__all__ = ['imresize']
_I = typing.Optional[int]
_D = typing.Optional[torch.dtype]
def nearest_contribution(x: torch.Tensor) -> torch.Tensor:
range_around_0 = torch.logical_and(x.gt(-0.5), x.le(0.5))
cont = range_around_0.to(dtype=x.dtype)
return cont
def linear_contribution(x: torch.Tensor) -> torch.Tensor:
ax = x.abs()
range_01 = ax.le(1)
cont = (1 - ax) * range_01.to(dtype=x.dtype)
return cont
def cubic_contribution(x: torch.Tensor, a: float=-0.5) -> torch.Tensor:
ax = x.abs()
ax2 = ax * ax
ax3 = ax * ax2
range_01 = ax.le(1)
range_12 = torch.logical_and(ax.gt(1), ax.le(2))
cont_01 = (a + 2) * ax3 - (a + 3) * ax2 + 1
cont_01 = cont_01 * range_01.to(dtype=x.dtype)
cont_12 = (a * ax3) - (5 * a * ax2) + (8 * a * ax) - (4 * a)
cont_12 = cont_12 * range_12.to(dtype=x.dtype)
cont = cont_01 + cont_12
return cont
def gaussian_contribution(x: torch.Tensor, sigma: float=2.0) -> torch.Tensor:
range_3sigma = (x.abs() <= 3 * sigma + 1)
# Normalization will be done after
cont = torch.exp(-x.pow(2) / (2 * sigma**2))
cont = cont * range_3sigma.to(dtype=x.dtype)
return cont
def discrete_kernel(
kernel: str, scale: float, antialiasing: bool=True) -> torch.Tensor:
'''
For downsampling with integer scale only.
'''
downsampling_factor = int(1 / scale)
if kernel == 'cubic':
kernel_size_orig = 4
else:
raise ValueError('Pass!')
if antialiasing:
kernel_size = kernel_size_orig * downsampling_factor
else:
kernel_size = kernel_size_orig
if downsampling_factor % 2 == 0:
a = kernel_size_orig * (0.5 - 1 / (2 * kernel_size))
else:
kernel_size -= 1
a = kernel_size_orig * (0.5 - 1 / (kernel_size + 1))
with torch.no_grad():
r = torch.linspace(-a, a, steps=kernel_size)
k = cubic_contribution(r).view(-1, 1)
k = torch.matmul(k, k.t())
k /= k.sum()
return k
def reflect_padding(
x: torch.Tensor,
dim: int,
pad_pre: int,
pad_post: int) -> torch.Tensor:
'''
Apply reflect padding to the given Tensor.
Note that it is slightly different from the PyTorch functional.pad,
where boundary elements are used only once.
Instead, we follow the MATLAB implementation
which uses boundary elements twice.
For example,
[a, b, c, d] would become [b, a, b, c, d, c] with the PyTorch implementation,
while our implementation yields [a, a, b, c, d, d].
'''
b, c, h, w = x.size()
if dim == 2 or dim == -2:
padding_buffer = x.new_zeros(b, c, h + pad_pre + pad_post, w)
padding_buffer[..., pad_pre:(h + pad_pre), :].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1, :].copy_(x[..., p, :])
for p in range(pad_post):
padding_buffer[..., h + pad_pre + p, :].copy_(x[..., -(p + 1), :])
else:
padding_buffer = x.new_zeros(b, c, h, w + pad_pre + pad_post)
padding_buffer[..., pad_pre:(w + pad_pre)].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1].copy_(x[..., p])
for p in range(pad_post):
padding_buffer[..., w + pad_pre + p].copy_(x[..., -(p + 1)])
return padding_buffer
def padding(
x: torch.Tensor,
dim: int,
pad_pre: int,
pad_post: int,
padding_type: typing.Optional[str]='reflect') -> torch.Tensor:
if padding_type is None:
return x
elif padding_type == 'reflect':
x_pad = reflect_padding(x, dim, pad_pre, pad_post)
else:
raise ValueError('{} padding is not supported!'.format(padding_type))
return x_pad
def get_padding(
base: torch.Tensor,
kernel_size: int,
x_size: int) -> typing.Tuple[int, int, torch.Tensor]:
base = base.long()
r_min = base.min()
r_max = base.max() + kernel_size - 1
if r_min <= 0:
pad_pre = -r_min
pad_pre = pad_pre.item()
base += pad_pre
else:
pad_pre = 0
if r_max >= x_size:
pad_post = r_max - x_size + 1
pad_post = pad_post.item()
else:
pad_post = 0
return pad_pre, pad_post, base
def get_weight(
dist: torch.Tensor,
kernel_size: int,
kernel: str='cubic',
sigma: float=2.0,
antialiasing_factor: float=1) -> torch.Tensor:
buffer_pos = dist.new_zeros(kernel_size, len(dist))
for idx, buffer_sub in enumerate(buffer_pos):
buffer_sub.copy_(dist - idx)
# Expand (downsampling) / Shrink (upsampling) the receptive field.
buffer_pos *= antialiasing_factor
if kernel == 'cubic':
weight = cubic_contribution(buffer_pos)
elif kernel == 'gaussian':
weight = gaussian_contribution(buffer_pos, sigma=sigma)
else:
raise ValueError('{} kernel is not supported!'.format(kernel))
weight /= weight.sum(dim=0, keepdim=True)
return weight
def reshape_tensor(x: torch.Tensor, dim: int, kernel_size: int) -> torch.Tensor:
# Resize height
if dim == 2 or dim == -2:
k = (kernel_size, 1)
h_out = x.size(-2) - kernel_size + 1
w_out = x.size(-1)
# Resize width
else:
k = (1, kernel_size)
h_out = x.size(-2)
w_out = x.size(-1) - kernel_size + 1
unfold = F.unfold(x, k)
unfold = unfold.view(unfold.size(0), -1, h_out, w_out)
return unfold
def reshape_input(
x: torch.Tensor) -> typing.Tuple[torch.Tensor, _I, _I, _I, _I]:
if x.dim() == 4:
b, c, h, w = x.size()
elif x.dim() == 3:
c, h, w = x.size()
b = None
elif x.dim() == 2:
h, w = x.size()
b = c = None
else:
raise ValueError('{}-dim Tensor is not supported!'.format(x.dim()))
x = x.view(-1, 1, h, w)
return x, b, c, h, w
def reshape_output(
x: torch.Tensor, b: _I, c: _I) -> torch.Tensor:
rh = x.size(-2)
rw = x.size(-1)
# Back to the original dimension
if b is not None:
x = x.view(b, c, rh, rw) # 4-dim
else:
if c is not None:
x = x.view(c, rh, rw) # 3-dim
else:
x = x.view(rh, rw) # 2-dim
return x
def cast_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _D]:
if x.dtype != torch.float32 or x.dtype != torch.float64:
dtype = x.dtype
x = x.float()
else:
dtype = None
return x, dtype
def cast_output(x: torch.Tensor, dtype: _D) -> torch.Tensor:
if dtype is not None:
if not dtype.is_floating_point:
x = x.round()
# To prevent over/underflow when converting types
if dtype is torch.uint8:
x = x.clamp(0, 255)
x = x.to(dtype=dtype)
return x
def resize_1d(
x: torch.Tensor,
dim: int,
size: typing.Optional[int],
scale: typing.Optional[float],
kernel: str='cubic',
sigma: float=2.0,
padding_type: str='reflect',
antialiasing: bool=True) -> torch.Tensor:
'''
Args:
x (torch.Tensor): A torch.Tensor of dimension (B x C, 1, H, W).
dim (int):
scale (float):
size (int):
Return:
'''
# Identity case
if scale == 1:
return x
# Default bicubic kernel with antialiasing (only when downsampling)
if kernel == 'cubic':
kernel_size = 4
else:
kernel_size = math.floor(6 * sigma)
if antialiasing and (scale < 1):
antialiasing_factor = scale
kernel_size = math.ceil(kernel_size / antialiasing_factor)
else:
antialiasing_factor = 1
# We allow margin to both sizes
kernel_size += 2
# Weights only depend on the shape of input and output,
# so we do not calculate gradients here.
with torch.no_grad():
pos = torch.linspace(
0, size - 1, steps=size, dtype=x.dtype, device=x.device,
)
pos = (pos + 0.5) / scale - 0.5
base = pos.floor() - (kernel_size // 2) + 1
dist = pos - base
weight = get_weight(
dist,
kernel_size,
kernel=kernel,
sigma=sigma,
antialiasing_factor=antialiasing_factor,
)
pad_pre, pad_post, base = get_padding(base, kernel_size, x.size(dim))
# To backpropagate through x
x_pad = padding(x, dim, pad_pre, pad_post, padding_type=padding_type)
unfold = reshape_tensor(x_pad, dim, kernel_size)
# Subsampling first
if dim == 2 or dim == -2:
sample = unfold[..., base, :]
weight = weight.view(1, kernel_size, sample.size(2), 1)
else:
sample = unfold[..., base]
weight = weight.view(1, kernel_size, 1, sample.size(3))
# Apply the kernel
x = sample * weight
x = x.sum(dim=1, keepdim=True)
return x
def downsampling_2d(
x: torch.Tensor,
k: torch.Tensor,
scale: int,
padding_type: str='reflect') -> torch.Tensor:
c = x.size(1)
k_h = k.size(-2)
k_w = k.size(-1)
k = k.to(dtype=x.dtype, device=x.device)
k = k.view(1, 1, k_h, k_w)
k = k.repeat(c, c, 1, 1)
e = torch.eye(c, dtype=k.dtype, device=k.device, requires_grad=False)
e = e.view(c, c, 1, 1)
k = k * e
pad_h = (k_h - scale) // 2
pad_w = (k_w - scale) // 2
x = padding(x, -2, pad_h, pad_h, padding_type=padding_type)
x = padding(x, -1, pad_w, pad_w, padding_type=padding_type)
y = F.conv2d(x, k, padding=0, stride=scale)
return y
def imresize(
input: torch.Tensor,
scale: typing.Optional[float]=None,
sizes: typing.Optional[typing.Tuple[int, int]]=None,
kernel: typing.Union[str, torch.Tensor]='cubic',
sigma: float=2,
rotation_degree: float=0,
padding_type: str='reflect',
antialiasing: bool=True) -> torch.Tensor:
'''
Args:
x (torch.Tensor):
scale (float):
sizes (tuple(int, int)):
kernel (str, default='cubic'):
sigma (float, default=2):
rotation_degree (float, default=0):
padding_type (str, default='reflect'):
antialiasing (bool, default=True):
Return:
torch.Tensor:
'''
if scale is None and sizes is None:
raise ValueError('One of scale or sizes must be specified!')
if scale is not None and sizes is not None:
raise ValueError('Please specify scale or sizes to avoid conflict!')
x, b, c, h, w = reshape_input(input)
if sizes is None:
'''
# Check if we can apply the convolution algorithm
scale_inv = 1 / scale
if isinstance(kernel, str) and scale_inv.is_integer():
kernel = discrete_kernel(kernel, scale, antialiasing=antialiasing)
elif isinstance(kernel, torch.Tensor) and not scale_inv.is_integer():
raise ValueError(
'An integer downsampling factor '
'should be used with a predefined kernel!'
)
'''
# Determine output size
sizes = (math.ceil(h * scale), math.ceil(w * scale))
scales = (scale, scale)
if scale is None:
scales = (sizes[0] / h, sizes[1] / w)
x, dtype = cast_input(x)
if isinstance(kernel, str):
# Shared keyword arguments across dimensions
kwargs = {
'kernel': kernel,
'sigma': sigma,
'padding_type': padding_type,
'antialiasing': antialiasing,
}
# Core resizing module
x = resize_1d(x, -2, size=sizes[0], scale=scales[0], **kwargs)
x = resize_1d(x, -1, size=sizes[1], scale=scales[1], **kwargs)
elif isinstance(kernel, torch.Tensor):
x = downsampling_2d(x, kernel, scale=int(1 / scale))
x = reshape_output(x, b, c)
x = cast_output(x, dtype)
return x
if __name__ == '__main__':
# Just for debugging
torch.set_printoptions(precision=4, sci_mode=False, edgeitems=16, linewidth=200)
a = torch.arange(64).float().view(1, 1, 8, 8)
z = imresize(a, 0.5)
print(z)
#a = torch.arange(16).float().view(1, 1, 4, 4)
'''
a = torch.zeros(1, 1, 4, 4)
a[..., 0, 0] = 100
a[..., 1, 0] = 10
a[..., 0, 1] = 1
a[..., 0, -1] = 100
a = torch.zeros(1, 1, 4, 4)
a[..., -1, -1] = 100
a[..., -2, -1] = 10
a[..., -1, -2] = 1
a[..., -1, 0] = 100
'''
#b = imresize(a, sizes=(3, 8), antialiasing=False)
#c = imresize(a, sizes=(11, 13), antialiasing=True)
#c = imresize(a, sizes=(4, 4), antialiasing=False, kernel='gaussian', sigma=1)
#print(a)
#print(b)
#print(c)
#r = discrete_kernel('cubic', 1 / 3)
#print(r)
'''
a = torch.arange(225).float().view(1, 1, 15, 15)
imresize(a, sizes=[5, 5])
'''
| 13,613 | 27.904459 | 84 | py |
PLRDiff | PLRDiff-main/guided_diffusion/rsfac_gaussian_diffusion.py | """
This code started out as a PyTorch port of the following:
https://github.com/HJ-harry/MCG_diffusion/blob/main/guided_diffusion/gaussian_diffusion.py
The conditions are changed and coefficient matrix estimation is added.
"""
import enum
import math
import numpy as np
import torch as th
from torch.autograd import grad
import torch.nn.functional as nF
from functools import partial
import torch.nn.parameter as Para
from .core import imresize
from os.path import join as join
def _warmup_beta(linear_start, linear_end, n_timestep, warmup_frac):
betas = linear_end * np.ones(n_timestep, dtype=np.float64)
warmup_time = int(n_timestep * warmup_frac)
betas[:warmup_time] = np.linspace(
linear_start, linear_end, warmup_time, dtype=np.float64)
return betas
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == 'quad':
betas = np.linspace(linear_start ** 0.5, linear_end ** 0.5,
n_timestep, dtype=np.float64) ** 2
elif schedule == 'linear':
betas = np.linspace(linear_start, linear_end,
n_timestep, dtype=np.float64)
elif schedule == 'warmup10':
betas = _warmup_beta(linear_start, linear_end,
n_timestep, 0.1)
elif schedule == 'warmup50':
betas = _warmup_beta(linear_start, linear_end,
n_timestep, 0.5)
elif schedule == 'const':
betas = linear_end * np.ones(n_timestep, dtype=np.float64)
elif schedule == 'jsd': # 1/T, 1/(T-1), 1/(T-2), ..., 1
betas = 1. / np.linspace(n_timestep,
1, n_timestep, dtype=np.float64)
elif schedule == "cosine":
timesteps = (
th.arange(n_timestep + 1, dtype=th.float64) /
n_timestep + cosine_s
)
alphas = timesteps / (1 + cosine_s) * math.pi / 2
alphas = th.cos(alphas).pow(2)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = betas.clamp(max=0.999)
else:
raise NotImplementedError(schedule)
return betas
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class Param(th.nn.Module):
def __init__(self, data):
super(Param, self).__init__()
self.E = Para.Parameter(data=data)
def forward(self,):
return self.E
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
"""
def __init__(
self,
*,
betas
):
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
self.sqrt_alphas_cumprod_prev = np.sqrt(np.append(1., self.alphas_cumprod))
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None
):
B = x.shape[0]
noise_level = th.FloatTensor([self.sqrt_alphas_cumprod_prev[int(t.item())+1]]).repeat(B, 1).to(x.device)
model_output = model(x, noise_level)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, posterior_log_variance = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
return {
"mean": model_mean,
"log_variance": posterior_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
):
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
)
noise = th.randn_like(x)
nonzero_param = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = out["mean"] + nonzero_param * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
Rr,
noise=None,
clip_denoised=True,
denoised_fn=None,
model_condition=None,
param=None,
save_root=None,
progress=True
):
finalX = None
finalE = None
dstep = 1000
for (sample, E) in self.p_sample_loop_progressive(
model,
shape,
Rr,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_condition=model_condition,
progress=progress,
param=param,
save_root=save_root
):
finalX = sample
finalE = E
return finalX["sample"], finalE
def p_sample_loop_progressive(
self,
model,
shape,
Rr,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_condition=None,
device=None,
progress=False,
param=None,
save_root=None # use it for output intermediate predictions
):
Bb, Cc, Hh, Ww = shape
Rr = Rr
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn((Bb, Rr, Hh, Ww), device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
blur = partial(nF.conv2d, weight=param['kernel'], padding=int((param['k_s'] - 1)/2), groups=Cc)
down = partial(imresize, scale=1/param['scale'])
LRMS = model_condition["LRMS"]
PAN = model_condition["PAN"]
## estimate coefficient matrix E
Eband = param['Band']
bimg = th.index_select(LRMS, 1, Eband).reshape(Bb, Rr, -1) # base tensor from LRMS
# estimate coefficient matrix E by solving least square problem
t1 = th.matmul(bimg, bimg.transpose(1,2)) + 1e-4*th.eye(Rr).type(bimg.dtype).to(device)
t2 = th.matmul(LRMS.reshape(Bb, Cc, -1), bimg.transpose(1,2))
E = th.matmul(t2, th.inverse(t1))
del bimg, t1, t2
for i in indices:
t = th.tensor([i] * shape[0], device=device)
# re-instantiate requires_grad for backpropagation
img = img.requires_grad_()
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn
)
xhat_1 = (out["pred_xstart"] +1)/2
xhat_1 = th.matmul(E, xhat_1.reshape(Bb, Rr, -1)).reshape(*shape)
xhat_2 = blur(input=xhat_1)
xhat_3 = down(input=xhat_2)
norm1 = th.norm(LRMS - xhat_3)
xhat_4 = th.matmul(xhat_1.permute(0,2,3,1), param["PH"]).permute(0,3,1,2) # HEX
norm2 = th.norm(PAN - xhat_4) # ||P - HEX||
norm_gradX = grad(outputs=norm1 + (param['eta2']/param['eta1'])*norm2, inputs=img)[0]
out["sample"] = out["sample"] - param['eta1']*norm_gradX
del norm_gradX
yield out, E
img = out["sample"]
# Clears out small amount of gpu memory. If not used, memory usage will accumulate and OOM will occur.
img.detach_()
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 16,226 | 34.900442 | 129 | py |
PLRDiff | PLRDiff-main/guided_diffusion/utils.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import os.path as osp
import json
import datetime
from collections import OrderedDict
def mkdirs(paths):
if isinstance(paths, str):
os.makedirs(paths, exist_ok=True)
else:
for path in paths:
os.makedirs(path, exist_ok=True)
def get_timestamp():
return datetime.now().strftime('%y%m%d_%H%M%S')
def parse(args):
args = vars(args)
opt_path = args['baseconfig']
# opt_path = args.config
gpu_ids = args['gpu_ids']
# remove comments starting with '//'
json_str = ''
with open(opt_path, 'r') as f:
for line in f:
line = line.split('//')[0] + '\n'
json_str += line
opt = json.loads(json_str, object_pairs_hook=OrderedDict)
for key in args:
if args[key] is not None:
opt[key] = args[key]
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_ids
print('export CUDA_VISIBLE_DEVICES=' + gpu_ids)
return opt
class NoneDict(dict):
def __missing__(self, key):
return None
def dict_to_nonedict(opt):
if isinstance(opt, dict):
new_opt = dict()
for key, sub_opt in opt.items():
new_opt[key] = dict_to_nonedict(sub_opt)
return NoneDict(**new_opt)
elif isinstance(opt, list):
return [dict_to_nonedict(sub_opt) for sub_opt in opt]
else:
return opt
| 1,545 | 23.935484 | 101 | py |
PLRDiff | PLRDiff-main/guided_diffusion/create.py | from . import rsfac_gaussian_diffusion as gd
def create_model_and_diffusion_RS(opt):
model = define_G(opt['model'])
opt2 = opt['diffusion']
diffusion = create_gaussian_diffusion(
beta_schedule= opt2['beta_schedule'],
beta_linear_start= opt2['beta_linear_start'],
beta_linear_end = opt2['beta_linear_end'],
steps=opt2['diffusion_steps']
)
return model, diffusion
def create_gaussian_diffusion(
*,
beta_schedule="linear",
beta_linear_start=1e-6,
beta_linear_end = 1e-2,
steps=1000
):
betas = gd.make_beta_schedule(
schedule=beta_schedule,
n_timestep=steps,
linear_start=beta_linear_start,
linear_end=beta_linear_end)
return gd.GaussianDiffusion(
betas=betas
)
#Modified from:
#https://github.com/wgcban/ddpm-cd/blob/b0213c0049bab215e470326d97499ae69416a843/model/networks.py#L82
####################
# define network
####################
# Generator
def define_G(model_opt):
from .sr3_modules import unet
if ('norm_groups' not in model_opt) or model_opt['norm_groups'] is None:
model_opt['norm_groups']=32
model = unet.UNet(
in_channel=model_opt['in_channel'],
out_channel=model_opt['out_channel'],
norm_groups=model_opt['norm_groups'],
inner_channel=model_opt['inner_channel'],
channel_mults=model_opt['channel_multiplier'],
attn_res=model_opt['attn_res'],
res_blocks=model_opt['res_blocks'],
dropout=model_opt['dropout'],
image_size=256
)
return model
| 1,601 | 26.62069 | 102 | py |
PLRDiff | PLRDiff-main/guided_diffusion/__init__.py | """
Codebase for "Improved Denoising Diffusion Probabilistic Models".
"""
| 74 | 17.75 | 65 | py |
PLRDiff | PLRDiff-main/guided_diffusion/sr3_modules/unet.py | import math
import torch
from torch import nn
import torch.nn.functional as F
from inspect import isfunction
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# PositionalEncoding Source: https://github.com/lmnt-com/wavegrad/blob/master/src/wavegrad/model.py
class PositionalEncoding(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, noise_level):
count = self.dim // 2
step = torch.arange(count, dtype=noise_level.dtype,
device=noise_level.device) / count
encoding = noise_level.unsqueeze(
1) * torch.exp(-math.log(1e4) * step.unsqueeze(0))
encoding = torch.cat(
[torch.sin(encoding), torch.cos(encoding)], dim=-1)
return encoding
class FeatureWiseAffine(nn.Module):
def __init__(self, in_channels, out_channels, use_affine_level=False):
super(FeatureWiseAffine, self).__init__()
self.use_affine_level = use_affine_level
self.noise_func = nn.Sequential(
nn.Linear(in_channels, out_channels*(1+self.use_affine_level))
)
def forward(self, x, noise_embed):
batch = x.shape[0]
if self.use_affine_level:
gamma, beta = self.noise_func(noise_embed).view(
batch, -1, 1, 1).chunk(2, dim=1)
x = (1 + gamma) * x + beta
else:
x = x + self.noise_func(noise_embed).view(batch, -1, 1, 1)
return x
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class Upsample(nn.Module):
def __init__(self, dim):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode="nearest")
self.conv = nn.Conv2d(dim, dim, 3, padding=1)
def forward(self, x):
return self.conv(self.up(x))
class Downsample(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = nn.Conv2d(dim, dim, 3, 2, 1)
def forward(self, x):
return self.conv(x)
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups=32, dropout=0):
super().__init__()
self.block = nn.Sequential(
nn.GroupNorm(groups, dim),
Swish(),
nn.Dropout(dropout) if dropout != 0 else nn.Identity(),
nn.Conv2d(dim, dim_out, 3, padding=1)
)
def forward(self, x):
return self.block(x)
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, noise_level_emb_dim=None, dropout=0, use_affine_level=False, norm_groups=32):
super().__init__()
self.noise_func = FeatureWiseAffine(
noise_level_emb_dim, dim_out, use_affine_level)
self.block1 = Block(dim, dim_out, groups=norm_groups)
self.block2 = Block(dim_out, dim_out, groups=norm_groups, dropout=dropout)
self.res_conv = nn.Conv2d(
dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb):
b, c, h, w = x.shape
h = self.block1(x)
h = self.noise_func(h, time_emb)
h = self.block2(h)
return h + self.res_conv(x)
class SelfAttention(nn.Module):
def __init__(self, in_channel, n_head=1, norm_groups=32):
super().__init__()
self.n_head = n_head
self.norm = nn.GroupNorm(norm_groups, in_channel)
self.qkv = nn.Conv2d(in_channel, in_channel * 3, 1, bias=False)
self.out = nn.Conv2d(in_channel, in_channel, 1)
def forward(self, input):
batch, channel, height, width = input.shape
n_head = self.n_head
head_dim = channel // n_head
norm = self.norm(input)
qkv = self.qkv(norm).view(batch, n_head, head_dim * 3, height, width)
query, key, value = qkv.chunk(3, dim=2) # bhdyx
attn = torch.einsum(
"bnchw, bncyx -> bnhwyx", query, key
).contiguous() / math.sqrt(channel)
attn = attn.view(batch, n_head, height, width, -1)
attn = torch.softmax(attn, -1)
attn = attn.view(batch, n_head, height, width, height, width)
out = torch.einsum("bnhwyx, bncyx -> bnchw", attn, value).contiguous()
out = self.out(out.view(batch, channel, height, width))
return out + input
class ResnetBlocWithAttn(nn.Module):
def __init__(self, dim, dim_out, *, noise_level_emb_dim=None, norm_groups=32, dropout=0, with_attn=False):
super().__init__()
self.with_attn = with_attn
self.res_block = ResnetBlock(
dim, dim_out, noise_level_emb_dim, norm_groups=norm_groups, dropout=dropout)
if with_attn:
self.attn = SelfAttention(dim_out, norm_groups=norm_groups)
def forward(self, x, time_emb):
x = self.res_block(x, time_emb)
if(self.with_attn):
x = self.attn(x)
return x
def Reverse(lst):
return [ele for ele in reversed(lst)]
class UNet(nn.Module):
def __init__(
self,
in_channel=6,
out_channel=3,
inner_channel=32,
norm_groups=32,
channel_mults=(1, 2, 4, 8, 8),
attn_res=(8),
res_blocks=3,
dropout=0,
with_noise_level_emb=True,
image_size=128
):
super().__init__()
if with_noise_level_emb:
noise_level_channel = inner_channel
self.noise_level_mlp = nn.Sequential(
PositionalEncoding(inner_channel),
nn.Linear(inner_channel, inner_channel * 4),
Swish(),
nn.Linear(inner_channel * 4, inner_channel)
)
else:
noise_level_channel = None
self.noise_level_mlp = None
num_mults = len(channel_mults)
pre_channel = inner_channel
feat_channels = [pre_channel]
now_res = image_size
self.init_conv = nn.Conv2d(in_channels=in_channel, out_channels=inner_channel, kernel_size=3, padding=1)
downs = []
for ind in range(num_mults):
is_last = (ind == num_mults - 1)
use_attn = (now_res in attn_res)
channel_mult = inner_channel * channel_mults[ind]
for _ in range(0, res_blocks):
downs.append(ResnetBlocWithAttn(
pre_channel, channel_mult, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups, dropout=dropout, with_attn=use_attn))
feat_channels.append(channel_mult)
pre_channel = channel_mult
if not is_last:
downs.append(Downsample(pre_channel))
feat_channels.append(pre_channel)
now_res = now_res//2
self.downs = nn.ModuleList(downs)
self.mid = nn.ModuleList([
ResnetBlocWithAttn(pre_channel, pre_channel, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups,
dropout=dropout, with_attn=True),
ResnetBlocWithAttn(pre_channel, pre_channel, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups,
dropout=dropout, with_attn=False)
])
ups = []
for ind in reversed(range(num_mults)):
is_last = (ind < 1)
use_attn = (now_res in attn_res)
channel_mult = inner_channel * channel_mults[ind]
for _ in range(0, res_blocks+1):
ups.append(ResnetBlocWithAttn(
pre_channel+feat_channels.pop(), channel_mult, noise_level_emb_dim=noise_level_channel, norm_groups=norm_groups,
dropout=dropout, with_attn=use_attn))
pre_channel = channel_mult
if not is_last:
ups.append(Upsample(pre_channel))
now_res = now_res*2
self.ups = nn.ModuleList(ups)
self.final_conv = Block(pre_channel, default(out_channel, in_channel), groups=norm_groups)
def forward(self, x, time, feat_need=False):
t = self.noise_level_mlp(time) if exists(
self.noise_level_mlp) else None
# First downsampling layer
x = self.init_conv(x)
# Diffusion encoder
feats = [x]
for layer in self.downs:
if isinstance(layer, ResnetBlocWithAttn):
x = layer(x, t)
else:
x = layer(x)
feats.append(x)
if feat_need:
fe = feats.copy()
# Passing through middle layer
for layer in self.mid:
if isinstance(layer, ResnetBlocWithAttn):
x = layer(x, t)
else:
x = layer(x)
# Saving decoder features for CD Head
if feat_need:
fd = []
# Diffiusion decoder
for layer in self.ups:
if isinstance(layer, ResnetBlocWithAttn):
x = layer(torch.cat((x, feats.pop()), dim=1), t)
if feat_need:
fd.append(x)
else:
x = layer(x)
# Final Diffusion layer
x = self.final_conv(x)
# Output encoder and decoder features if feat_need
if feat_need:
return fe, Reverse(fd)
else:
return x
| 9,347 | 31.8 | 150 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/diffusion.py | import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from diffusers import DDPMScheduler, UNet2DModel
from matplotlib import pyplot as plt
from diffusers import DDIMScheduler, DDPMPipeline
from data.dataset import data_loader
import wandb
import tqdm
wandb.init(project="ml-708", entity="mbzuai-")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
root_dir = "data/TB_data"
loader_, dataset = data_loader(root_dir=root_dir, batch_size=10)
train_dataloader = loader_['train']
def corrupt(x, amount):
"""Corrupt the input `x` by mixing it with noise according to `amount`"""
noise = torch.rand_like(x)
amount = amount.view(-1, 1, 1, 1) # Sort shape so broadcasting works
return x*(1-amount) + noise*amount
#@markdown Trying UNet2DModel instead of BasicUNet:
# Dataloader (you can mess with batch size)
#batch_size = 70
#train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# How many runs through the data should we do?
n_epochs = 200
# Create the network
net = UNet2DModel(
sample_size=224, # the target image resolution
in_channels=3, # the number of input channels, 3 for RGB images
out_channels=3, # the number of output channels
layers_per_block=2,
act_fn="silu",
add_attention=True,
center_input_sample=False,
downsample_padding=0,
flip_sin_to_cos=False,
freq_shift=1,
mid_block_scale_factor=1,
norm_eps=1e-06,
norm_num_groups=32,
time_embedding_type="positional", # how many ResNet layers to use per UNet block
block_out_channels=(128,
128,
256,
256,
512,
512), # Roughly matching our basic unet example
down_block_types=(
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"AttnDownBlock2D",
"DownBlock2D"
),
up_block_types=(
"UpBlock2D",
"AttnUpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D" # a regular ResNet upsampling block
),
)
#<<<
net.to(device)
# Our loss finction
loss_fn = nn.MSELoss()
# The optimizer
opt = torch.optim.Adam(net.parameters(), lr=1e-3)
lr=1e-3
# Keeping a record of the losses for later viewing
losses = []
scheduler = DDIMScheduler(beta_end=0.02,beta_schedule="linear",beta_start=0.0001, clip_sample=True, num_train_timesteps=1000, prediction_type="epsilon")
image_pipe = DDPMPipeline(net,scheduler=scheduler)
image_pipe.to(device);
grad_accumulation_steps = 2 # @param
optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=lr)
# The training loop
for epoch in range(n_epochs):
for step, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader)):
images, labels = batch
clean_images = images.to(device)
# Sample noise to add to the images
noise = torch.randn(clean_images.shape).to(clean_images.device)
bs = clean_images.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0,
image_pipe.scheduler.num_train_timesteps,
(bs,),
device=clean_images.device,
).long()
# Add noise to the clean images according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps)
# Get the model prediction for the noise
noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0]
# Compare the prediction with the actual noise:
loss = F.mse_loss(
noise_pred, noise
) # NB - trying to predict noise (eps) not (noisy_ims-clean_ims) or just (clean_ims)
# Store for later plotting
losses.append(loss.item())
# Update the model parameters with the optimizer based on this loss
loss.backward(loss)
# Gradient accumulation:
if (step + 1) % grad_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
print(f"Epoch {epoch} average loss: {sum(losses[-len(train_dataloader):])/len(train_dataloader)}")
image_pipe.save_pretrained(f"saved_model/my-finetuned-model_{epoch}")
| 4,562 | 32.551471 | 152 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/test.py | import argparse
import torch
from tqdm import tqdm
import os
from models import mlp
from data.dataset import data_loader
from data.dataset import data_loader_attacks
root_dir = "./data/attack-data/0.03"
def test_vit(model, dataloader_test):
"""
This function used to test ViT.
Args:
model: ViT model
dataaloader_test: loader for test images
return:
Avg test accuracy of ViT
"""
test_acc = 0.0
for images, labels in tqdm(dataloader_test):
images = images.cuda()
labels= labels.cuda()
with torch.no_grad():
model.eval()
output = model(images)
prediction = torch.argmax(output, dim=-1)
acc = sum(prediction == labels).float().item()/len(labels)
test_acc += acc
print(f'Testing accuracy = {(test_acc/len(dataloader_test)):.4f}')
return round(test_acc/len(dataloader_test),2)
def test_mlps(mlps_list, dataloader_test, mlp_root_dir):
for mlp in range(1, len(mlps_list) +1):
acc_avg = 0.0
mlp_in = torch.load(os.path.join(mlp_root_dir, mlps_list[mlp-1])).cuda()
mlp_in.eval()
print(f'MLP of index {mlp-1} has been loaded')
for images, labels in tqdm(dataloader_test):
images = images.cuda()
labels= labels.cuda()
x = model.patch_embed(images)
x = model.pos_drop(x)
for block in range(mlp):
x = model.blocks[block](x)
with torch.no_grad():
output = mlp_in(x)
predictions = torch.argmax(output, dim=-1)
acc = torch.sum(predictions == labels).item()/len(labels)
acc_avg += acc
print(f'Accuracy of block {mlp-1} = {(acc_avg/len(dataloader_test)):.3f}')
pass
parser = argparse.ArgumentParser(description='Testing ViT or MLPs')
parser.add_argument('--model_name', type=str , choices=['ViT','MLPs'],
help='Choose between ViT or MLPs')
parser.add_argument('--vit_path', type=str ,
help='pass the path of downloaded ViT')
parser.add_argument('--mlp_path', type=str ,
help='pass the path for the downloaded MLPs folder')
args = parser.parse_args()
loader_, dataset_ = data_loader_attacks(root_dir=root_dir, attack_name='FGSM')
model = torch.load(args.vit_path).cuda()
model.eval()
if args.model_name == 'ViT':
acc = test_vit(model=model, dataloader_test=loader_)
else:
mlps_list = sorted(os.listdir(args.mlp_path))
acc = test_mlps(mlps_list= mlps_list, dataloader_test=loader_, mlp_root_dir=args.mlp_path) | 2,645 | 31.666667 | 94 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/aabb.py | import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from diffusers import DDIMScheduler, DDPMPipeline
from matplotlib import pyplot as plt
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import os
from data.dataset import data_loader
device = (
"mps"
if torch.backends.mps.is_available()
else "cuda"
if torch.cuda.is_available()
else "cpu"
)
def test():
image_pipe = DDPMPipeline.from_pretrained("saved_model/my-finetuned-model_135")
image_pipe.to(device);
scheduler = DDIMScheduler.from_pretrained("saved_model/my-finetuned-model_135/scheduler")
scheduler.set_timesteps(num_inference_steps=600)
x = torch.randn(8, 3, 256, 256).to(device) # Batch of 8
for i, t in tqdm(enumerate(scheduler.timesteps)):
model_input = scheduler.scale_model_input(x, t)
with torch.no_grad():
noise_pred = image_pipe.unet(model_input, t)["sample"]
x = scheduler.step(noise_pred, t, x).prev_sample
return x
x = test()
grid = torchvision.utils.make_grid(x, nrow=4)
plt.imshow(grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5);
| 1,250 | 29.512195 | 93 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/attack.py |
import foolbox as fb
import torch
import torch.nn as nn
from autoattack import AutoAttack
class Attack():
"""
This class used to generate adversarial images.
when create object specify epsilon: float, attack_type: 'FGSM, CW, BIM, L2PGD, PGD, LinfBIM'.
generate method return images and success tensors.
test_model method, give the accuracy of the model after passing the adversarial examples.
succecces tensor shows whether the example succed to fool the model or not
"""
def __init__(self, epsilon, attack_type, model) :
self.epsilon= epsilon
self.attack_type = attack_type
self.model_fool = fb.models.PyTorchModel(model ,bounds=(0,1))
self.adversary = AutoAttack(model, norm='Linf', eps=self.epsilon, version='standard')
def FGSM(self, samples, labels):
"""
Generate FGSM attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.FGSM()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def L2PGD(self, samples, labels):
"""
Generate L2 PGD attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.L2PGD()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def CW(self, samples, labels):
"""
Generate Carlini & Wagner attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.L2CarliniWagnerAttack(6,1000,0.01,0)
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons= self.epsilon)
print(f'Sum = {sum(success)}')
return adv_images, success
def BIM(self, samples, labels):
"""
Generate BIM attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.L2BasicIterativeAttack()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def PGD(self, samples, labels):
"""
Generate Linf PGD attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.PGD()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def LinfBIM(self, samples, labels):
"""
Generate Linf BIM attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
attack_func = fb.attacks.LinfBasicIterativeAttack()
_, adv_images, success = attack_func(self.model_fool,
samples,
labels,
epsilons = self.epsilon)
return adv_images, success
def AutoAttack(self, samples, labels):
"""
Generate Auto attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images generated from the clean images
success tensor shows whether the attack succeded in fooling the model or not
"""
x_adv = self.adversary.run_standard_evaluation(samples, labels, bs=15)
success = None
return x_adv, success
def generate_attack(self, samples, labels):
"""
Generate attacks.
Args:
samples -> clean images
labels -> labels of clean images
return:
adversarial images -> generated from the clean images
success tensor -> shows whether the attack succeded in fooling the model or not
"""
if self.attack_type == 'FGSM':
adv_img, success = self.FGSM(samples, labels)
return adv_img, success
elif self.attack_type == 'CW':
adv_img, success = self.CW(samples, labels)
return adv_img, success
elif self.attack_type == 'L2PGD':
adv_img, success = self.L2PGD(samples, labels)
return adv_img, success
elif self.attack_type == 'BIM':
adv_img, success = self.BIM(samples, labels)
return adv_img, success
elif self.attack_type == 'PGD':
adv_img, success = self.PGD(samples, labels)
return adv_img, success
elif self.attack_type =='LinfBIM':
adv_img, success = self.LinfBIM(samples, labels)
return adv_img, success
elif self.attack_type =='AutoAttack':
adv_img, success = self.AutoAttack(samples, labels)
return adv_img, success
else:
print(f'Attacks of type {self.attack_type} is not supported')
| 7,000 | 34.358586 | 99 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/mlp.py | import torch.nn as nn
import torch
from utils import get_classifiers_list
class Classifier(nn.Module):
"""
MLP classifier.
Args:
num_classes -> number of classes
in_feature -> features dimension
return logits.
"""
def __init__(self,num_classes=2 ,in_features = 768*196):
super().__init__()
self.linear1 = nn.Linear(in_features= in_features, out_features= 4096)
self.linear2 = nn.Linear(in_features= 4096, out_features= 2048)
self.linear3 = nn.Linear(in_features= 2048, out_features= 128)
self.linear4 = nn.Linear(in_features= 128, out_features= num_classes)
self.dropout = nn.Dropout(0.3)
def forward(self,x):
x= x.reshape(-1, 196*768)
x = nn.functional.relu(self.linear1(x))
x = nn.functional.relu(self.linear2(x))
x = nn.functional.relu(self.linear3(x))
x = self.linear4(x)
return x
class Big_model(nn.Module):
def __init__(self, MLP_path = 'models/MLP_new_chest', num_classifiers=3, vit_path='models/vit_base_patch16_224_in21k_test-accuracy_0.96_chest.pth'):
super().__init__()
self.MLP_path = MLP_path
self.vit_path = vit_path
self.num_classifiers= num_classifiers
self.mlp_list = get_classifiers_list(self.MLP_path, num_classifiers = self.num_classifiers)
self.model = torch.load(self.vit_path)
def forward(self,x):
final_prediction = []
vit_predictions = self.model(x)
y = torch.softmax(vit_predictions*25, dim=-1)
final_prediction.append(y)
x = self.model.patch_embed(x)
x_0 = self.model.pos_drop(x)
i = 0
for mlp in self.mlp_list:
x_0 = self.model.blocks[i](x_0)
mlp_output = mlp(x_0)
mlp_predictions = torch.softmax(mlp_output*25, dim=-1)
final_prediction.append(mlp_predictions)
i+=1
stacked_tesnor = torch.stack(final_prediction,dim=1)
preds_major = stacked_tesnor.sum(dim=1)
#preds_major = preds_major.float()
#preds_major = preds_major.requires_grad_(True)
return preds_major
| 2,262 | 33.815385 | 152 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/utils.py | import os
import torch
from attack import Attack
import numpy as np
import matplotlib.pyplot as plt
from torchvision.utils import save_image
from autoattack import AutoAttack
def generate_save_attacks(attack_names, model, samples, classes ,attack_image_dir, epsilon = 0.03, batch_size=30):
"""
it saves attack images generated from test images in test attacks folder in a dirr folder.
inside test attacks we will have folder for each attack, and inside each attack folder we will have folder for classes.
Args:
attack_names --> list of attacks.
model --> model want to attack.
samples --> data_loaders['test']
classes --> list of classes names.
attack_image_dir --> root directory for attack images to be saved in
Doesnt return any values.
"""
for attack in attack_names:
attack_folder = f'Test_attacks_{attack}'
print(attack_image_dir)
if not os.path.exists(os.path.join(attack_image_dir, attack_folder)):
os.makedirs(os.path.join(attack_image_dir, attack_folder))
inter_ = os.path.join(attack_image_dir, attack_folder) + '/'
for classe in classes:
if not os.path.exists(os.path.join(inter_, classe)):
os.makedirs(os.path.join(inter_, classe))
for attack_name in attack_names:
if attack_name != 'AUTOPGD':
batchNum = 0
model.eval()
attack = Attack(epsilon= epsilon , attack_type= attack_name, model=model)
for im, lab in samples:
im = im.cuda()
lab = lab.cuda()
adv_img, _ = attack.generate_attack(im, labels=lab)
print('Batch')
count = 0
for image, label in zip(adv_img, lab):
if (lab[count]):
save_image(image, os.path.join(attack_image_dir, f'Test_attacks_{attack_name}/{classes[1]}/' + str(batchNum) + "-" + str(count) + attack_name + ".png"))
else:
save_image(image, os.path.join(attack_image_dir, f'Test_attacks_{attack_name}/{classes[0]}/' + str(batchNum) + "-" + str(count) + attack_name + ".png"))
count += 1
batchNum += 1
elif attack_name == 'AUTOPGD':
batchNum = 0
adversary = AutoAttack(model=model, eps=epsilon, version='custom', norm='Linf', attacks_to_run=['apgd-ce'])
for im, lab in samples:
im = im.cuda()
lab = lab.cuda()
adv_img = adversary.run_standard_evaluation(im,lab, bs=lab.shape[0])
count = 0
for image, label in zip(adv_img, lab):
if (lab[count]):
save_image(image, os.path.join(attack_image_dir, f'Test_attacks_{attack_name}/{classes[1]}/' + str(batchNum) + "-" + str(count) + attack_name + ".png"))
else:
save_image(image, os.path.join(attack_image_dir, f'Test_attacks_{attack_name}/{classes[0]}/' + str(batchNum) + "-" + str(count) + attack_name + ".png"))
count += 1
batchNum += 1
def get_classifiers_list(MLP_path, num_classifiers=5):
"""
Return list of intermdiate MLPs.
Args:
MLP_path: Path of the downloaded MLPs directory.
"""
i=0
classifiers_list = [0]*num_classifiers
for classif in sorted(os.listdir(MLP_path)):
classifiers_list[i] = torch.load(os.path.join(MLP_path, classif)).eval().cuda()
i+=1
print(f'MLP {i} is loaded!')
return classifiers_list
def frob_norm_kl_matrix(stacked_tesnor,num_classifiers=5):
frob_values = []
for sample in stacked_tesnor:
div_matrix = torch.zeros((num_classifiers+1,num_classifiers+1)) #initialize zero 6x6 tensor
for i in range (num_classifiers+1): #loop over classifiers and MLP head (take one only)
for j in range(num_classifiers+1): #loop over classifiers and MLP head
x2 = torch.nn.functional.kl_div(sample[i].log(),sample[j].log(), reduction='sum', log_target=True).item()
div_matrix[i,j] = x2
frob_norm = np.sqrt(torch.sum(torch.square(div_matrix)).item())
frob_values.append(frob_norm)
return frob_values
def roc(attack_name, frob_dict, threshold):
tpr_list= []
fpr_list = []
for i in threshold:
fp = sum(frob_dict['clean'] >= i).item()
tn = sum(frob_dict['clean'] < i).item()
tp = sum(frob_dict[attack_name] >= i).item()
fn = sum(frob_dict[attack_name] < i).item()
fpr = (fp)/(fp+tn)
tpr = (tp)/(tp+fn)
tpr_list.append(tpr)
fpr_list.append(fpr)
return tpr_list,fpr_list,threshold
def frobenius_norm(data_loader, model, mlps_list):
frob_norm_values = []
for images, _ in data_loader: #loop over images
final_probs = []
images = images.cuda()
vit_probs = torch.softmax(model(images).detach().cpu(),dim=-1)
final_probs.append(vit_probs.detach().cpu())
x = model.patch_embed(images)
x_0 = model.pos_drop(x)
i=0
for mlp in mlps_list:
x_0 = model.blocks[i](x_0)
mlp_prob = torch.softmax(mlp(x_0).detach().cpu(),dim=-1)
final_probs.append(mlp_prob.detach().cpu())
i+=1
stacked_tesnor = torch.stack(final_probs,dim=1)
frob_list = frob_norm_kl_matrix(stacked_tesnor)
frob_norm_values = frob_norm_values + frob_list
return frob_norm_values
def plot_roc (tpr_list, fpr_list, attack_name):
plt.figure(figsize=(10,6))
plt.plot(fpr_list,tpr_list, '-', label = attack_name)
plt.title(f'ROC_{attack_name}_Attack')
plt.legend(loc=4)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.savefig(f'{attack_name}_ROC_Curve', bbox_inches='tight')
plt.show() | 6,033 | 37.433121 | 177 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/majority_voting.py | import os
import torch
import argparse
import numpy as np
from utils import *
from data.dataset import data_loader, data_loader_attacks
import mlp
def majority_voting(data_loader, model, mlps_list):
"""
SEViT performance with majority voting.
Args:
data_loader: loader of test samples for clean images, or attackes generated from the test samples
model: ViT model
mlps_list: list of intermediate MLPs
Return:
Accuracy.
"""
acc_ = 0.0
for images, labels in data_loader:
final_prediction = []
images = images.cuda()
vit_output = model(images)
vit_predictions = torch.argmax(vit_output.detach().cpu(), dim=-1)
final_prediction.append(vit_predictions.detach().cpu())
x = model.patch_embed(images)
x_0 = model.pos_drop(x)
i=0
for mlp in mlps_list:
x_0 = model.blocks[i](x_0)
mlp_output = mlp(x_0)
mlp_predictions = torch.argmax(mlp_output.detach().cpu(), dim=-1)
final_prediction.append(mlp_predictions.detach().cpu())
i+=1
stacked_tesnor = torch.stack(final_prediction,dim=1)
preds_major = torch.argmax(torch.nn.functional.one_hot(stacked_tesnor).sum(dim=1), dim=-1)
acc = (preds_major == labels).sum().item()/len(labels)
acc_ += acc
final_acc = acc_ / len(data_loader)
print(f'Final Accuracy From Majority Voting = {(final_acc *100) :.3f}%' )
return final_acc
parser = argparse.ArgumentParser(description='Majority Voting')
parser.add_argument('--images_type', type=str , choices=['clean', 'adversarial'],
help='Path to root directory of images')
parser.add_argument('--image_folder_path', type=str ,
help='Path to root directory of images')
parser.add_argument('--vit_path', type=str ,
help='Path to the downloaded ViT model')
parser.add_argument('--mlp_path', type=str ,
help='Path to the downloaded MLPs folder')
parser.add_argument('--attack_name', type=str,
help='Attack name')
args = parser.parse_args()
model = torch.load(args.vit_path).cuda()
model.eval()
print('ViT is loaded!')
MLPs_list = get_classifiers_list(MLP_path=args.mlp_path)
print('All MLPs are loaded!')
if args.images_type == 'clean':
loader_, dataset_ = data_loader(root_dir=args.image_folder_path, batch_size=15)
majority_voting(data_loader=loader_['test'], model= model, mlps_list=MLPs_list)
else:
loader_, dataset_ = data_loader_attacks(root_dir=args.image_folder_path, attack_name= args.attack_name, batch_size=15)
majority_voting(data_loader=loader_, model= model, mlps_list=MLPs_list)
| 2,732 | 30.77907 | 122 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/finetuning_diffusion_model.py | import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from diffusers import DDIMScheduler, DDPMPipeline
from matplotlib import pyplot as plt
from PIL import Image
from torchvision import transforms
from diffusers import DDPMScheduler, UNet2DModel
from tqdm.auto import tqdm
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import os
from data.dataset import data_loader
import wandb
wandb.init(project="ml-708", entity="mbzuai-")
device = (
"mps"
if torch.backends.mps.is_available()
else "cuda"
if torch.cuda.is_available()
else "cpu"
)
root_dir = "data/TB_data"
loader_, dataset = data_loader(root_dir=root_dir, batch_size=8)
train_dataloader = loader_['train']
net = UNet2DModel(
sample_size=224, # the target image resolution
in_channels=3, # the number of input channels, 3 for RGB images
out_channels=3, # the number of output channels
layers_per_block=2,
act_fn="silu",
add_attention=True,
center_input_sample=False,
downsample_padding=0,
flip_sin_to_cos=False,
freq_shift=1,
mid_block_scale_factor=1,
norm_eps=1e-06,
norm_num_groups=32,
time_embedding_type="positional", # how many ResNet layers to use per UNet block
block_out_channels=(128,
128,
256,
256,
512,
512), # Roughly matching our basic unet example
down_block_types=(
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"AttnDownBlock2D",
"DownBlock2D"
),
up_block_types=(
"UpBlock2D",
"AttnUpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D" # a regular ResNet upsampling block
),
)
net.to(device)
def train(train_dataloader, epoch_st, epoch_end, lr=1e-4):
#image_pipe = DDPMPipeline.from_pretrained("saved_model/my-finetuned-model_66")
scheduler = DDIMScheduler(beta_end=0.02,beta_schedule="linear",beta_start=0.0001, clip_sample=True, num_train_timesteps=1000, prediction_type="epsilon")
image_pipe = DDPMPipeline(net,scheduler=scheduler)
image_pipe.to(device);
grad_accumulation_steps = 2 # @param
optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=lr)
losses = []
for epoch in range(epoch_st,epoch_end):
for step, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader)):
images, labels = batch
clean_images = images.to(device)
# Sample noise to add to the images
noise = torch.randn(clean_images.shape).to(clean_images.device)
bs = clean_images.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0,
image_pipe.scheduler.num_train_timesteps,
(bs,),
device=clean_images.device,
).long()
# Add noise to the clean images according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps)
# Get the model prediction for the noise
noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0]
# Compare the prediction with the actual noise:
loss = F.mse_loss(
noise_pred, noise
) # NB - trying to predict noise (eps) not (noisy_ims-clean_ims) or just (clean_ims)
# Store for later plotting
losses.append(loss.item())
# Update the model parameters with the optimizer based on this loss
loss.backward(loss)
# Gradient accumulation:
if (step + 1) % grad_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
print(
f"Epoch {epoch} average loss: {sum(losses[-len(train_dataloader):])/len(train_dataloader)}"
)
image_pipe.save_pretrained(f"saved_model_scratch/my-model_{epoch}")
return image_pipe
model = train(train_dataloader=train_dataloader,epoch_st=0,epoch_end=200) | 4,351 | 30.766423 | 156 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/generate_attacks.py | import torch
import argparse
from attack import Attack
from utils import *
from data.dataset import data_loader
from mlp import Big_model
parser = argparse.ArgumentParser(description='Generate Attack from ViT')
parser.add_argument('--epsilons', type=float ,
help='Perturbations Size')
parser.add_argument('--attack_list', type=str , nargs='+',
help='Attack List to Generate')
parser.add_argument('--vit_path', type=str ,
help='pass the path for the downloaded MLPs folder')
parser.add_argument('--attack_images_dir', type=str ,
help='Directory to save the generated attacks')
args = parser.parse_args()
root_dir = "./data/TB_data"
loader_, dataset_ = data_loader(root_dir=root_dir)
""" model = torch.load(args.vit_path).cuda()
model.eval() """
device = torch.device("cuda")
model_mlp = Big_model()
model_mlp.to(device)
model_mlp.eval()
#Generate and save attacks
generate_save_attacks(
attack_names= args.attack_list,
model= model_mlp,
samples= loader_['test'],
classes= ['Normal', 'Tuberculosis'],
attack_image_dir= args.attack_images_dir,
epsilon=args.epsilons,
)
| 1,179 | 28.5 | 72 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/adversarial_detection.py | import torch
import numpy as np
from utils import *
import argparse
from data.dataset import data_loader, data_loader_attacks
parser = argparse.ArgumentParser(description='ROC For Attack')
parser.add_argument('--clean_image_folder_path', type=str ,
help='Path to root directory of images')
parser.add_argument('--attack_image_folder_path', type=str ,
help='Path to root directory of images')
parser.add_argument('--vit_path', type=str ,
help='Path to the downloaded ViT model')
parser.add_argument('--mlp_path', type=str ,
help='Path to the downloaded MLPs folder')
parser.add_argument('--attack_name', type=str,
help='Attack name')
args = parser.parse_args()
#Load Models
model = torch.load(args.vit_path).cuda()
model.eval()
print('ViT is loaded!')
#Load MLPs
MLPs_list = get_classifiers_list(MLP_path=args.mlp_path)
print('All MLPs are loaded!')
#Load Images (clean and attack)
batch_size = 10
clean_loader_, _= data_loader(root_dir=args.clean_image_folder_path, batch_size=batch_size)
attack_loader_, _= data_loader_attacks(root_dir=args.attack_image_folder_path, attack_name= args.attack_name, batch_size=batch_size)
print('Clean test samples and corresponding adversarial samples are loaded')
#Find Frobenuis Norm
frob_list_clean = frobenius_norm(data_loader=clean_loader_['test'], model=model, mlps_list= MLPs_list)
frob_list_attack = frobenius_norm(data_loader=attack_loader_, model=model, mlps_list= MLPs_list)
print('Frobenuis norm has been calculated')
frob_dict = {'clean': torch.tensor(frob_list_clean), args.attack_name:torch.tensor(frob_list_attack)}
#Find TPR and FPR
tpr_list, fpr_list, threshold = roc(attack_name= args.attack_name, frob_dict= frob_dict, threshold= np.arange(0,90,0.1))
#Plot ROC
plot_roc(tpr_list= tpr_list, fpr_list= fpr_list, attack_name= args.attack_name)
print('ROC figure has been saved in the current directory!') | 1,975 | 36.283019 | 132 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/models/mlp.py | import torch.nn as nn
import torch
class Classifier(nn.Module):
"""
MLP classifier.
Args:
num_classes -> number of classes
in_feature -> features dimension
return logits.
"""
def __init__(self,num_classes=2 ,in_features = 768*196):
super().__init__()
self.linear1 = nn.Linear(in_features= in_features, out_features= 4096)
self.linear2 = nn.Linear(in_features= 4096, out_features= 2048)
self.linear3 = nn.Linear(in_features= 2048, out_features= 128)
self.linear4 = nn.Linear(in_features= 128, out_features= num_classes)
self.dropout = nn.Dropout(0.3)
def forward(self,x):
x= x.reshape(-1, 196*768)
x = nn.functional.relu(self.linear1(x))
x = nn.functional.relu(self.linear2(x))
x = nn.functional.relu(self.linear3(x))
x = self.linear4(x)
return x | 911 | 30.448276 | 78 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/data/dataset.py | import os
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
# DataLoader and Dataset (Clean Samples)
def data_loader( root_dir, image_size = (224,224), batch_size= 15, train_dir = 'training',test_dir = 'testing', vald_dir = 'validation'):
"""
Class to create Dataset and DataLoader from Image folder.
Args:
image_size -> size of the image after resize
batch_size
root_dir -> root directory of the dataset (downloaded dataset)
return:
dataloader -> dict includes dataloader for train/test and validation
dataset -> dict includes dataset for train/test and validation
"""
dirs = {'train' : os.path.join(root_dir,train_dir),
'valid' : os.path.join(root_dir,vald_dir),
'test' : os.path.join(root_dir,test_dir)
}
data_transform = {
'train': transforms.Compose([
# transforms.Grayscale(num_output_channels=3),
# transforms.RandomRotation (20),
transforms.Resize(image_size),
# transforms.RandomAffine(degrees =0,translate=(0.1,0.1)),
transforms.ToTensor()
]),
'valid': transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor()
]),
'test' : transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor()
])
}
image_dataset = {x: ImageFolder(dirs[x], transform= data_transform[x])
for x in ('train', 'valid','test')}
data_loaders= {x: DataLoader(image_dataset[x], batch_size= batch_size,
shuffle=True, num_workers=12) for x in ['train']}
data_loaders['test'] = DataLoader(image_dataset['test'], batch_size= batch_size,
shuffle=False, num_workers=12, drop_last=True)
data_loaders['valid'] = DataLoader(image_dataset['valid'], batch_size= batch_size,
shuffle=False, num_workers=12, drop_last=True)
dataset_size = {x: len(image_dataset[x]) for x in ['train', 'valid','test']}
print ([f'number of {i} images is {dataset_size[i]}' for i in (dataset_size)])
class_idx= image_dataset['test'].class_to_idx
print (f'Classes with index are: {class_idx}')
class_names = image_dataset['test'].classes
print(class_names)
return data_loaders, image_dataset
#Dataloader and Dataset (Adversarial Samples)
def data_loader_attacks(root_dir, attack_name ,image_size = (224,224), batch_size = 30):
"""
Class to create Dataset and DataLoader from Image folder for adversarial samples generated.
Args:
root _dir: root directory of generated adversarial samples.
attack_name: attack name that has folder in root_dir.
image_size : size of the image after resize (224,224)
batch_size
return:
dataloader : dataloader for the attack
dataset : dataset for attack
"""
dirs = os.path.join(root_dir, f'Test_attacks_{attack_name}')
data_transform = transforms.Compose([transforms.Resize(image_size),
transforms.ToTensor()]
)
image_dataset = ImageFolder(dirs, transform= data_transform)
data_loaders =DataLoader(image_dataset, batch_size= batch_size,
shuffle=False, num_workers=8, drop_last=True)
print (f'number of images is {len(image_dataset)}')
class_idx= image_dataset.class_to_idx
print (f'Classes with index are: {class_idx}')
return data_loaders, image_dataset | 4,070 | 36.694444 | 139 | py |
stylegan-encoder | stylegan-encoder-master/train_effnet.py | """
Trains a modified EfficientNet to generate approximate dlatents using examples from a trained StyleGAN.
Props to @SimJeg on GitHub for the original code this is based on, from this thread: https://github.com/Puzer/stylegan-encoder/issues/1#issuecomment-490469454
"""
import os
import math
import numpy as np
import pickle
import cv2
import argparse
import dnnlib
import config
import dnnlib.tflib as tflib
import tensorflow
import keras.backend as K
from efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, preprocess_input
from keras.layers import Input, LocallyConnected1D, Reshape, Permute, Conv2D, Add, Concatenate
from keras.models import Model, load_model
"""
Truncation method from @oneiroid
"""
def truncate_fancy(dlat, dlat_avg, model_scale=18, truncation_psi=0.7, minlayer=0, maxlayer=8, do_clip=False):
layer_idx = np.arange(model_scale)[np.newaxis, :, np.newaxis]
ones = np.ones(layer_idx.shape, dtype=np.float32)
coefs = np.where(layer_idx < maxlayer, truncation_psi * ones, ones)
if minlayer > 0:
coefs[0, :minlayer, :] = ones[0, :minlayer, :]
if do_clip:
return tflib.lerp_clip(dlat_avg, dlat, coefs).eval()
else:
return tflib.lerp(dlat_avg, dlat, coefs)
def truncate_normal(dlat, dlat_avg, truncation_psi=0.7):
return (dlat - dlat_avg) * truncation_psi + dlat_avg
def generate_dataset_main(n=10000, save_path=None, seed=None, model_res=1024, image_size=256, minibatch_size=32, truncation=0.7, fancy_truncation=False):
"""
Generates a dataset of 'n' images of shape ('size', 'size', 3) with random seed 'seed'
along with their dlatent vectors W of shape ('n', 512)
These datasets can serve to train an inverse mapping from X to W as well as explore the latent space
More variation added to latents; also, negative truncation added to balance these examples.
"""
n = n // 2 # this gets doubled because of negative truncation below
model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
Gs = load_Gs()
if (model_scale % 3 == 0):
mod_l = 3
else:
mod_l = 2
if seed is not None:
b = bool(np.random.RandomState(seed).randint(2))
Z = np.random.RandomState(seed).randn(n*mod_l, Gs.input_shape[1])
else:
b = bool(np.random.randint(2))
Z = np.random.randn(n*mod_l, Gs.input_shape[1])
if b:
mod_l = model_scale // 2
mod_r = model_scale // mod_l
if seed is not None:
Z = np.random.RandomState(seed).randn(n*mod_l, Gs.input_shape[1])
else:
Z = np.random.randn(n*mod_l, Gs.input_shape[1])
W = Gs.components.mapping.run(Z, None, minibatch_size=minibatch_size) # Use mapping network to get unique dlatents for more variation.
dlatent_avg = Gs.get_var('dlatent_avg') # [component]
if fancy_truncation:
W = np.append(truncate_fancy(W, dlatent_avg, model_scale, truncation), truncate_fancy(W, dlatent_avg, model_scale, -truncation), axis=0)
else:
W = np.append(truncate_normal(W, dlatent_avg, truncation), truncate_normal(W, dlatent_avg, -truncation), axis=0)
W = W[:, :mod_r]
W = W.reshape((n*2, model_scale, 512))
X = Gs.components.synthesis.run(W, randomize_noise=False, minibatch_size=minibatch_size, print_progress=True,
output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True))
X = np.array([cv2.resize(x, (image_size, image_size), interpolation = cv2.INTER_AREA) for x in X])
X = preprocess_input(X)
return W, X
def generate_dataset(n=10000, save_path=None, seed=None, model_res=1024, image_size=256, minibatch_size=16, truncation=0.7, fancy_truncation=False):
"""
Use generate_dataset_main() as a helper function.
Divides requests into batches to save memory.
"""
batch_size = 16
inc = n//batch_size
left = n-((batch_size-1)*inc)
W, X = generate_dataset_main(inc, save_path, seed, model_res, image_size, minibatch_size, truncation, fancy_truncation)
for i in range(batch_size-2):
aW, aX = generate_dataset_main(inc, save_path, seed, model_res, image_size, minibatch_size, truncation, fancy_truncation)
W = np.append(W, aW, axis=0)
aW = None
X = np.append(X, aX, axis=0)
aX = None
aW, aX = generate_dataset_main(left, save_path, seed, model_res, image_size, minibatch_size, truncation, fancy_truncation)
W = np.append(W, aW, axis=0)
aW = None
X = np.append(X, aX, axis=0)
aX = None
if save_path is not None:
prefix = '_{}_{}'.format(seed, n)
np.save(os.path.join(os.path.join(save_path, 'W' + prefix)), W)
np.save(os.path.join(os.path.join(save_path, 'X' + prefix)), X)
return W, X
def is_square(n):
return (n == int(math.sqrt(n) + 0.5)**2)
def get_effnet_model(save_path, model_res=1024, image_size=256, depth=1, size=3, activation='elu', loss='logcosh', optimizer='adam'):
if os.path.exists(save_path):
print('Loading model')
return load_model(save_path)
# Build model
print('Building model')
model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
if (size <= 0):
effnet = EfficientNetB0(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
if (size == 1):
effnet = EfficientNetB1(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
if (size == 2):
effnet = EfficientNetB2(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
if (size >= 3):
effnet = EfficientNetB3(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
layer_size = model_scale*8*8*8
if is_square(layer_size): # work out layer dimensions
layer_l = int(math.sqrt(layer_size)+0.5)
layer_r = layer_l
else:
layer_m = math.log(math.sqrt(layer_size),2)
layer_l = 2**math.ceil(layer_m)
layer_r = layer_size // layer_l
layer_l = int(layer_l)
layer_r = int(layer_r)
x_init = None
inp = Input(shape=(image_size, image_size, 3))
x = effnet(inp)
if (size < 1):
x = Conv2D(model_scale*8, 1, activation=activation)(x) # scale down
if (depth > 0):
x = Reshape((layer_r, layer_l))(x) # See https://github.com/OliverRichter/TreeConnect/blob/master/cifar.py - TreeConnect inspired layers instead of dense layers.
else:
if (depth < 1):
depth = 1
if (size <= 2):
x = Conv2D(model_scale*8*4, 1, activation=activation)(x) # scale down a bit
x = Reshape((layer_r*2, layer_l*2))(x) # See https://github.com/OliverRichter/TreeConnect/blob/master/cifar.py - TreeConnect inspired layers instead of dense layers.
else:
x = Reshape((384,256))(x) # full size for B3
while (depth > 0):
x = LocallyConnected1D(layer_r, 1, activation=activation)(x)
x = Permute((2, 1))(x)
x = LocallyConnected1D(layer_l, 1, activation=activation)(x)
x = Permute((2, 1))(x)
if x_init is not None:
x = Add()([x, x_init]) # add skip connection
x_init = x
depth-=1
if (size >= 2): # add unshared layers at end for different sections of the latent space
x_init = x
if layer_r % 3 == 0 and layer_l % 3 == 0:
a = LocallyConnected1D(layer_r, 1, activation=activation)(x)
b = LocallyConnected1D(layer_r, 1, activation=activation)(x)
c = LocallyConnected1D(layer_r, 1, activation=activation)(x)
a = Permute((2, 1))(a)
b = Permute((2, 1))(b)
c = Permute((2, 1))(c)
a = LocallyConnected1D(layer_l//3, 1, activation=activation)(a)
b = LocallyConnected1D(layer_l//3, 1, activation=activation)(b)
c = LocallyConnected1D(layer_l//3, 1, activation=activation)(c)
x = Concatenate()([a,b,c])
else:
a = LocallyConnected1D(layer_l, 1, activation=activation)(x)
b = LocallyConnected1D(layer_l, 1, activation=activation)(x)
a = Permute((2, 1))(a)
b = Permute((2, 1))(b)
a = LocallyConnected1D(layer_r//2, 1, activation=activation)(a)
b = LocallyConnected1D(layer_r//2, 1, activation=activation)(b)
x = Concatenate()([a,b])
x = Add()([x, x_init]) # add skip connection
x = Reshape((model_scale, 512))(x) # train against all dlatent values
model = Model(inputs=inp,outputs=x)
model.compile(loss=loss, metrics=[], optimizer=optimizer) # By default: adam optimizer, logcosh used for loss.
return model
def finetune_effnet(model, args):
"""
Finetunes an EfficientNet to predict W from X
Generate batches (X, W) of size 'batch_size', iterates 'n_epochs', and repeat while 'max_patience' is reached
on the test set. The model is saved every time a new best test loss is reached.
"""
save_path = args.model_path
model_res=args.model_res
image_size=args.image_size
batch_size=args.batch_size
test_size=args.test_size
max_patience=args.max_patience
n_epochs=args.epochs
seed=args.seed
minibatch_size=args.minibatch_size
truncation=args.truncation
fancy_truncation=args.fancy_truncation
use_ktrain=args.use_ktrain
ktrain_max_lr=args.ktrain_max_lr
ktrain_reduce_lr=args.ktrain_reduce_lr
ktrain_stop_early=args.ktrain_stop_early
assert image_size >= 224
# Create a test set
np.random.seed(seed)
print('Creating test set:')
W_test, X_test = generate_dataset(n=test_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation, fancy_truncation=fancy_truncation)
# Iterate on batches of size batch_size
print('Generating training set:')
patience = 0
epoch = -1
best_loss = np.inf
#loss = model.evaluate(X_test, W_test)
#print('Initial test loss : {:.5f}'.format(loss))
while (patience <= max_patience):
W_train = X_train = None
W_train, X_train = generate_dataset(batch_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation, fancy_truncation=fancy_truncation)
if use_ktrain:
print('Creating validation set:')
W_val, X_val = generate_dataset(n=test_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation, fancy_truncation=fancy_truncation)
learner = ktrain.get_learner(model=model,
train_data=(X_train, W_train), val_data=(X_val, W_val),
workers=1, use_multiprocessing=False,
batch_size=minibatch_size)
#learner.lr_find() # simulate training to find good learning rate
#learner.lr_plot() # visually identify best learning rate
learner.autofit(ktrain_max_lr, checkpoint_folder='/tmp', reduce_on_plateau=ktrain_reduce_lr, early_stopping=ktrain_stop_early)
learner = None
print('Done with current validation set.')
model.fit(X_val, W_val, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
else:
model.fit(X_train, W_train, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
loss = model.evaluate(X_test, W_test, batch_size=minibatch_size)
if loss < best_loss:
print('New best test loss : {:.5f}'.format(loss))
patience = 0
best_loss = loss
else:
print('Test loss : {:.5f}'.format(loss))
patience += 1
if (patience > max_patience): # When done with test set, train with it and discard.
print('Done with current test set.')
model.fit(X_test, W_test, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
print('Saving model.')
model.save(save_path)
parser = argparse.ArgumentParser(description='Train an EfficientNet to predict latent representations of images in a StyleGAN model from generated examples', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model_url', default='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', help='Fetch a StyleGAN model to train on from this URL')
parser.add_argument('--model_res', default=1024, help='The dimension of images in the StyleGAN model', type=int)
parser.add_argument('--data_dir', default='data', help='Directory for storing the EfficientNet model')
parser.add_argument('--model_path', default='data/finetuned_effnet.h5', help='Save / load / create the EfficientNet model with this file path')
parser.add_argument('--model_depth', default=1, help='Number of TreeConnect layers to add after EfficientNet', type=int)
parser.add_argument('--model_size', default=1, help='Model size - 0 - small, 1 - medium, 2 - large, or 3 - full size.', type=int)
parser.add_argument('--use_ktrain', default=False, help='Use ktrain for training', type=bool)
parser.add_argument('--ktrain_max_lr', default=0.001, help='Maximum learning rate for ktrain', type=float)
parser.add_argument('--ktrain_reduce_lr', default=1, help='Patience for reducing learning rate after a plateau for ktrain', type=float)
parser.add_argument('--ktrain_stop_early', default=3, help='Patience for early stopping for ktrain', type=float)
parser.add_argument('--activation', default='elu', help='Activation function to use after EfficientNet')
parser.add_argument('--optimizer', default='adam', help='Optimizer to use')
parser.add_argument('--loss', default='logcosh', help='Loss function to use')
parser.add_argument('--use_fp16', default=False, help='Use 16-bit floating point', type=bool)
parser.add_argument('--image_size', default=256, help='Size of images for EfficientNet model', type=int)
parser.add_argument('--batch_size', default=2048, help='Batch size for training the EfficientNet model', type=int)
parser.add_argument('--test_size', default=512, help='Batch size for testing the EfficientNet model', type=int)
parser.add_argument('--truncation', default=0.7, help='Generate images using truncation trick', type=float)
parser.add_argument('--fancy_truncation', default=True, help='Use fancier truncation proposed by @oneiroid', type=float)
parser.add_argument('--max_patience', default=2, help='Number of iterations to wait while test loss does not improve', type=int)
parser.add_argument('--freeze_first', default=False, help='Start training with the pre-trained network frozen, then unfreeze', type=bool)
parser.add_argument('--epochs', default=2, help='Number of training epochs to run for each batch', type=int)
parser.add_argument('--minibatch_size', default=16, help='Size of minibatches for training and generation', type=int)
parser.add_argument('--seed', default=-1, help='Pick a random seed for reproducibility (-1 for no random seed selected)', type=int)
parser.add_argument('--loop', default=-1, help='Run this many iterations (-1 for infinite, halt with CTRL-C)', type=int)
args, other_args = parser.parse_known_args()
os.makedirs(args.data_dir, exist_ok=True)
if args.seed == -1:
args.seed = None
if args.use_fp16:
K.set_floatx('float16')
K.set_epsilon(1e-4)
if args.use_ktrain:
import ktrain
tflib.init_tf()
model = get_effnet_model(args.model_path, model_res=args.model_res, depth=args.model_depth, size=args.model_size, activation=args.activation, optimizer=args.optimizer, loss=args.loss)
with dnnlib.util.open_url(args.model_url, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
def load_Gs():
return Gs_network
#K.get_session().run(tensorflow.global_variables_initializer())
if args.freeze_first:
model.layers[1].trainable = False
model.compile(loss=args.loss, metrics=[], optimizer=args.optimizer)
model.summary()
if args.freeze_first: # run a training iteration first while pretrained model is frozen, then unfreeze.
finetune_effnet(model, args)
model.layers[1].trainable = True
model.compile(loss=args.loss, metrics=[], optimizer=args.optimizer)
model.summary()
if args.loop < 0:
while True:
finetune_effnet(model, args)
else:
count = args.loop
while count > 0:
finetune_effnet(model, args)
count -= 1
| 16,451 | 47.674556 | 213 | py |
stylegan-encoder | stylegan-encoder-master/pretrained_example.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Minimal script for generating an image using pre-trained StyleGAN generator."""
import os
import pickle
import numpy as np
import PIL.Image
from PIL import Image
import dnnlib
import dnnlib.tflib as tflib
import config
from training import misc
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
rnd = np.random.RandomState(5)
num_classes = 10
def main():
# Initialize TensorFlow.
tflib.init_tf()
# Load pre-trained network.
url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
_G, _D, Gs = pickle.load(f)
# _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
# _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
# Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
# Print network details.
Gs.print_layers()
# Pick latent vector.
latents = rnd.randn(1, Gs.input_shape[1])
# Generate image.
images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)
# Save image.
os.makedirs(config.result_dir, exist_ok=True)
png_filename = os.path.join(config.result_dir, 'example.png')
PIL.Image.fromarray(images[0], 'RGB').save(png_filename)
def main_conditional():
# Initialize TensorFlow
tflib.init_tf()
# Load pre-trained network
dir = 'results/00004-sgan-cifar10-1gpu-cond/'
fn = 'network-snapshot-010372.pkl'
_G, _D, Gs = pickle.load(open(os.path.join(dir,fn), 'rb'))
# Print network details
Gs.print_layers()
# rnd = np.random.RandomState(10)
# Initialize conditioning
conditioning = np.eye(num_classes)
for i, rnd in enumerate([np.random.RandomState(i) for i in np.arange(20)]):
# Pick latent vector.
latents = rnd.randn(num_classes, Gs.input_shape[1])
# Generate image.
images = Gs.run(latents, conditioning, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)
images = images.reshape(32*10, 32, 3)
# Save image.
png_filename = os.path.join(dir, 'example_{}.png'.format(i))
PIL.Image.fromarray(images, 'RGB').save(png_filename)
def main_binary():
# Initialize Tensorflow
tflib.init_tf()
# Load pre-trained network
dir = 'results/00005-sgancelebahq-binary-1gpu-cond-wgangp/'
dir = 'results/00006-sgancelebahq-binary-1gpu-cond-wgangp/'
fn = 'network-snapshot-006926.pkl'
_, _, Gs = pickle.load(open(os.path.join(dir,fn), 'rb'))
# Print network details
Gs.print_layers()
# Create binary attributes
# eyeglasses, male, black_hair, smiling, young
classes = {
'5_o_Clock_Shadow': 0,
'Arched_Eyebrows': 0,
'Attractive': 1,
'Bags_Under_Eyes': 0,
'Bald': 0,
'Bangs': 0,
'Big_Lips': 0,
'Big_Nose': 0,
'Black_Hair': 0,
'Blond_Hair': 0,
'Blurry': 0,
'Brown_Hair': 1,
'Bushy_Eyebrows': 0,
'Chubby': 0,
'Double_Chin': 0,
'Eyeglasses': 0,
'Goatee': 0,
'Gray_Hair': 0,
'Heavy_Makeup': 1,
'High_Cheekbones': 1,
'Male': 0,
'Mouth_Slightly_Open': 1,
'Mustache': 0,
'Narrow_Eyes': 0,
'No_Beard': 0,
'Oval_Face': 1,
'Pale_Skin': 0,
'Pointy_Nose': 0,
'Receding_Hairline': 0,
'Rosy_Cheeks': 0,
'Sideburns': 0,
'Smiling': 0,
'Straight_Hair': 0,
'Wavy_Hair': 1,
'Wearing_Earrings': 0,
'Wearing_Hat': 0,
'Wearing_Lipstick': 1,
'Wearing_Necklace': 0,
'Wearing_Necktie': 0,
'Young': 1
}
print([attr for (attr,key) in classes.items() if key==1])
binary = np.array(list(classes.values())).reshape(1,-1)
for i, rnd in enumerate([np.random.RandomState(i) for i in np.arange(20)]):
latent = rnd.randn(1, Gs.input_shape[1])
image = Gs.run(latent, binary, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)
image = image.reshape(256,256,3)
png_filename = os.path.join(dir, 'examples/example{}.png'.format(i))
PIL.Image.fromarray(image, 'RGB').save(png_filename)
def main_textual():
# Initialize Tensorflow
tflib.init_tf()
dir = 'results/00015-sgancoco_train-1gpu-cond'
fn = 'network-snapshot-025000.pkl'
_, _, Gs = pickle.load(open(os.path.join(dir,fn), 'rb'))
# Print network details
Gs.print_layers()
embeddings = np.load('datasets/coco_test/coco_test-rxx.labels')
fns=np.load('datasets/coco_test/fns.npy')
# Use only 1 description (instead of all 5, to compare to attnGAN)
embeddings = embeddings[0::5]
fns = fns[0::5]
for i, rnd in enumerate([np.random.RandomState(i) for i in np.arange(embeddings.shape[0])]):
latent = rnd.randn(1, Gs.input_shape[1])
emb = embeddings[i].reshape(1,-1)
image = Gs.run(latent, emb, truncation_psi=0.8, randomize_noise=True, output_transform=fmt)
image = image.reshape(256,256,3)
png_filename = os.path.join(dir, 'examples/{}.png'.format(fns[i]))
image = Image.fromarray(image)
image.save(png_filename)
if __name__ == "__main__":
# main()
# main_conditional()
# main_binary()
main_textual()
| 6,422 | 30.79703 | 116 | py |
stylegan-encoder | stylegan-encoder-master/generate_figures.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Minimal script for reproducing the figures of the StyleGAN paper using pre-trained generators."""
import os
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
#----------------------------------------------------------------------------
# Helpers for loading and using pre-trained generators.
url_ffhq = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
url_celebahq = 'https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf' # karras2019stylegan-celebahq-1024x1024.pkl
url_bedrooms = 'https://drive.google.com/uc?id=1MOSKeGF0FJcivpBI7s63V9YHloUTORiF' # karras2019stylegan-bedrooms-256x256.pkl
url_cars = 'https://drive.google.com/uc?id=1MJ6iCfNtMIRicihwRorsM3b7mmtmK9c3' # karras2019stylegan-cars-512x384.pkl
url_cats = 'https://drive.google.com/uc?id=1MQywl0FNt6lHu8E_EUqnRbviagS7fbiJ' # karras2019stylegan-cats-256x256.pkl
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8)
_Gs_cache = dict()
def load_Gs(url):
if url not in _Gs_cache:
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
_G, _D, Gs = pickle.load(f)
_Gs_cache[url] = Gs
return _Gs_cache[url]
#----------------------------------------------------------------------------
# Figures 2, 3, 10, 11, 12: Multi-resolution grid of uncurated result images.
def draw_uncurated_result_figure(png, Gs, cx, cy, cw, ch, rows, lods, seed):
print(png)
latents = np.random.RandomState(seed).randn(sum(rows * 2**lod for lod in lods), Gs.input_shape[1])
images = Gs.run(latents, None, **synthesis_kwargs) # [seed, y, x, rgb]
canvas = PIL.Image.new('RGB', (sum(cw // 2**lod for lod in lods), ch * rows), 'white')
image_iter = iter(list(images))
for col, lod in enumerate(lods):
for row in range(rows * 2**lod):
image = PIL.Image.fromarray(next(image_iter), 'RGB')
image = image.crop((cx, cy, cx + cw, cy + ch))
image = image.resize((cw // 2**lod, ch // 2**lod), PIL.Image.ANTIALIAS)
canvas.paste(image, (sum(cw // 2**lod for lod in lods[:col]), row * ch // 2**lod))
canvas.save(png)
#----------------------------------------------------------------------------
# Figure 3: Style mixing.
def draw_style_mixing_figure(png, Gs, w, h, src_seeds, dst_seeds, style_ranges):
print(png)
src_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in src_seeds)
dst_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in dst_seeds)
src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component]
dst_dlatents = Gs.components.mapping.run(dst_latents, None) # [seed, layer, component]
src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs)
dst_images = Gs.components.synthesis.run(dst_dlatents, randomize_noise=False, **synthesis_kwargs)
canvas = PIL.Image.new('RGB', (w * (len(src_seeds) + 1), h * (len(dst_seeds) + 1)), 'white')
for col, src_image in enumerate(list(src_images)):
canvas.paste(PIL.Image.fromarray(src_image, 'RGB'), ((col + 1) * w, 0))
for row, dst_image in enumerate(list(dst_images)):
canvas.paste(PIL.Image.fromarray(dst_image, 'RGB'), (0, (row + 1) * h))
row_dlatents = np.stack([dst_dlatents[row]] * len(src_seeds))
row_dlatents[:, style_ranges[row]] = src_dlatents[:, style_ranges[row]]
row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs)
for col, image in enumerate(list(row_images)):
canvas.paste(PIL.Image.fromarray(image, 'RGB'), ((col + 1) * w, (row + 1) * h))
canvas.save(png)
#----------------------------------------------------------------------------
# Figure 4: Noise detail.
def draw_noise_detail_figure(png, Gs, w, h, num_samples, seeds):
print(png)
canvas = PIL.Image.new('RGB', (w * 3, h * len(seeds)), 'white')
for row, seed in enumerate(seeds):
latents = np.stack([np.random.RandomState(seed).randn(Gs.input_shape[1])] * num_samples)
images = Gs.run(latents, None, truncation_psi=1, **synthesis_kwargs)
canvas.paste(PIL.Image.fromarray(images[0], 'RGB'), (0, row * h))
for i in range(4):
crop = PIL.Image.fromarray(images[i + 1], 'RGB')
crop = crop.crop((650, 180, 906, 436))
crop = crop.resize((w//2, h//2), PIL.Image.NEAREST)
canvas.paste(crop, (w + (i%2) * w//2, row * h + (i//2) * h//2))
diff = np.std(np.mean(images, axis=3), axis=0) * 4
diff = np.clip(diff + 0.5, 0, 255).astype(np.uint8)
canvas.paste(PIL.Image.fromarray(diff, 'L'), (w * 2, row * h))
canvas.save(png)
#----------------------------------------------------------------------------
# Figure 5: Noise components.
def draw_noise_components_figure(png, Gs, w, h, seeds, noise_ranges, flips):
print(png)
Gsc = Gs.clone()
noise_vars = [var for name, var in Gsc.components.synthesis.vars.items() if name.startswith('noise')]
noise_pairs = list(zip(noise_vars, tflib.run(noise_vars))) # [(var, val), ...]
latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds)
all_images = []
for noise_range in noise_ranges:
tflib.set_vars({var: val * (1 if i in noise_range else 0) for i, (var, val) in enumerate(noise_pairs)})
range_images = Gsc.run(latents, None, truncation_psi=1, randomize_noise=False, **synthesis_kwargs)
range_images[flips, :, :] = range_images[flips, :, ::-1]
all_images.append(list(range_images))
canvas = PIL.Image.new('RGB', (w * 2, h * 2), 'white')
for col, col_images in enumerate(zip(*all_images)):
canvas.paste(PIL.Image.fromarray(col_images[0], 'RGB').crop((0, 0, w//2, h)), (col * w, 0))
canvas.paste(PIL.Image.fromarray(col_images[1], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, 0))
canvas.paste(PIL.Image.fromarray(col_images[2], 'RGB').crop((0, 0, w//2, h)), (col * w, h))
canvas.paste(PIL.Image.fromarray(col_images[3], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, h))
canvas.save(png)
#----------------------------------------------------------------------------
# Figure 8: Truncation trick.
def draw_truncation_trick_figure(png, Gs, w, h, seeds, psis):
print(png)
latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds)
dlatents = Gs.components.mapping.run(latents, None) # [seed, layer, component]
dlatent_avg = Gs.get_var('dlatent_avg') # [component]
canvas = PIL.Image.new('RGB', (w * len(psis), h * len(seeds)), 'white')
for row, dlatent in enumerate(list(dlatents)):
row_dlatents = (dlatent[np.newaxis] - dlatent_avg) * np.reshape(psis, [-1, 1, 1]) + dlatent_avg
row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs)
for col, image in enumerate(list(row_images)):
canvas.paste(PIL.Image.fromarray(image, 'RGB'), (col * w, row * h))
canvas.save(png)
#----------------------------------------------------------------------------
# Main program.
def main():
tflib.init_tf()
os.makedirs(config.result_dir, exist_ok=True)
draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure02-uncurated-ffhq.png'), load_Gs(url_ffhq), cx=0, cy=0, cw=1024, ch=1024, rows=3, lods=[0,1,2,2,3,3], seed=5)
draw_style_mixing_figure(os.path.join(config.result_dir, 'figure03-style-mixing.png'), load_Gs(url_ffhq), w=1024, h=1024, src_seeds=[639,701,687,615,2268], dst_seeds=[888,829,1898,1733,1614,845], style_ranges=[range(0,4)]*3+[range(4,8)]*2+[range(8,18)])
draw_noise_detail_figure(os.path.join(config.result_dir, 'figure04-noise-detail.png'), load_Gs(url_ffhq), w=1024, h=1024, num_samples=100, seeds=[1157,1012])
draw_noise_components_figure(os.path.join(config.result_dir, 'figure05-noise-components.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[1967,1555], noise_ranges=[range(0, 18), range(0, 0), range(8, 18), range(0, 8)], flips=[1])
draw_truncation_trick_figure(os.path.join(config.result_dir, 'figure08-truncation-trick.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[91,388], psis=[1, 0.7, 0.5, 0, -0.5, -1])
draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure10-uncurated-bedrooms.png'), load_Gs(url_bedrooms), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=0)
draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure11-uncurated-cars.png'), load_Gs(url_cars), cx=0, cy=64, cw=512, ch=384, rows=4, lods=[0,1,2,2,3,3], seed=2)
draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure12-uncurated-cats.png'), load_Gs(url_cats), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=1)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 9,563 | 58.037037 | 257 | py |
stylegan-encoder | stylegan-encoder-master/adaptive.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Implements the adaptive form of the loss.
You should only use this function if 1) you want the loss to change it's shape
during training (otherwise use general.py) or 2) you want to impose the loss on
a wavelet or DCT image representation, a only this function has easy support for
that.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from robust_loss import distribution
from robust_loss import util
from robust_loss import wavelet
def _check_scale(scale_lo, scale_init):
"""Helper function for checking `scale_lo` and `scale_init`."""
if not np.isscalar(scale_lo):
raise ValueError('`scale_lo` must be a scalar, but is of type {}'.format(
type(scale_lo)))
if not np.isscalar(scale_init):
raise ValueError('`scale_init` must be a scalar, but is of type {}'.format(
type(scale_init)))
if not scale_lo > 0:
raise ValueError('`scale_lo` must be > 0, but is {}'.format(scale_lo))
if not scale_init >= scale_lo:
raise ValueError('`scale_init` = {} must be >= `scale_lo` = {}'.format(
scale_init, scale_lo))
def _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=''):
"""Helper function for constructing scale variables."""
if scale_lo == scale_init:
# If the difference between the minimum and initial scale is zero, then
# we just fix `scale` to be a constant.
scale = tf.tile(
tf.cast(scale_init, float_dtype)[tf.newaxis, tf.newaxis],
(1, x.shape[1]))
else:
# Otherwise we construct a "latent" scale variable and define `scale`
# As an affine function of a softplus on that latent variable.
latent_scale = tf.get_variable(
'LatentScale' + var_suffix, initializer=tf.zeros((1, x.shape[1]), float_dtype))
scale = util.affine_softplus(latent_scale, lo=scale_lo, ref=scale_init)
return scale
def lossfun(x,
alpha_lo=0.001,
alpha_hi=1.999,
alpha_init=None,
scale_lo=1e-5,
scale_init=1.,
var_suffix='',
**kwargs):
"""Computes the adaptive form of the robust loss on a matrix.
This function behaves differently from general.lossfun() and
distribution.nllfun(), which are "stateless", allow the caller to specify the
shape and scale of the loss, and allow for arbitrary sized inputs. This
function only allows for rank-2 inputs for the residual `x`, and expects that
`x` is of the form [batch_index, dimension_index]. This function then
constructs free parameters (TF variables) that define the alpha and scale
parameters for each dimension of `x`, such that all alphas are in
(`alpha_lo`, `alpha_hi`) and all scales are in (`scale_lo`, Infinity).
The assumption is that `x` is, say, a matrix where x[i,j] corresponds to a
pixel at location j for image i, with the idea being that all pixels at
location j should be modeled with the same shape and scale parameters across
all images in the batch. This function also returns handles to the scale and
shape parameters being optimized over, mostly for debugging and introspection.
If the user wants to fix alpha or scale to be a constant, this can be done by
setting alpha_lo=alpha_hi or scale_lo=scale_init respectively.
Args:
x: The residual for which the loss is being computed. Must be a rank-2
tensor, where the innermost dimension is the batch index, and the
outermost dimension corresponds to different "channels", where this
function will assign each channel its own variable shape (alpha) and scale
parameters that are constructed as TF variables and can be optimized over.
Must be a TF tensor or numpy array of single or double precision floats.
The precision of `x` will determine the precision of the latent variables
used to model scale and alpha internally.
alpha_lo: The lowest possible value for loss's alpha parameters, must be >=
0 and a scalar. Should probably be in (0, 2).
alpha_hi: The highest possible value for loss's alpha parameters, must be >=
alpha_lo and a scalar. Should probably be in (0, 2).
alpha_init: The value that the loss's alpha parameters will be initialized
to, must be in (`alpha_lo`, `alpha_hi`), unless `alpha_lo` == `alpha_hi`
in which case this will be ignored. Defaults to (`alpha_lo` + `alpha_hi`)
/ 2
scale_lo: The lowest possible value for the loss's scale parameters. Must be
> 0 and a scalar. This value may have more of an effect than you think, as
the loss is unbounded as scale approaches zero (say, at a delta function).
scale_init: The initial value used for the loss's scale parameters. This
also defines the zero-point of the latent representation of scales, so SGD
may cause optimization to gravitate towards producing scales near this
value.
**kwargs: Arguments to be passed to the underlying distribution.nllfun().
Returns:
A tuple of the form (`loss`, `alpha`, `scale`).
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `alpha`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct a scale variable for each dimension of `x` but not for each
batch element. This contains the current estimated scale parameter for
each dimension, and will change during optimization.
`alpha`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct an alpha variable for each dimension of `x` but not for each
batch element. This contains the current estimated alpha parameter for
each dimension, and will change during optimization.
Raises:
ValueError: If any of the arguments are invalid.
"""
_check_scale(scale_lo, scale_init)
if not np.isscalar(alpha_lo):
raise ValueError('`alpha_lo` must be a scalar, but is of type {}'.format(
type(alpha_lo)))
if not np.isscalar(alpha_hi):
raise ValueError('`alpha_hi` must be a scalar, but is of type {}'.format(
type(alpha_hi)))
if alpha_init is not None and not np.isscalar(alpha_init):
raise ValueError(
'`alpha_init` must be None or a scalar, but is of type {}'.format(
type(alpha_init)))
if not alpha_lo >= 0:
raise ValueError('`alpha_lo` must be >= 0, but is {}'.format(alpha_lo))
if not alpha_hi >= alpha_lo:
raise ValueError('`alpha_hi` = {} must be >= `alpha_lo` = {}'.format(
alpha_hi, alpha_lo))
if alpha_init is not None and alpha_lo != alpha_hi:
if not (alpha_init > alpha_lo and alpha_init < alpha_hi):
raise ValueError(
'`alpha_init` = {} must be in (`alpha_lo`, `alpha_hi`) = ({} {})'
.format(alpha_init, alpha_lo, alpha_hi))
float_dtype = x.dtype
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 2), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
if alpha_lo == alpha_hi:
# If the range of alphas is a single item, then we just fix `alpha` to be
# a constant.
alpha = tf.tile(
tf.cast(alpha_lo, float_dtype)[tf.newaxis, tf.newaxis],
(1, x.shape[1]))
else:
# Otherwise we construct a "latent" alpha variable and define `alpha`
# As an affine function of a sigmoid on that latent variable, initialized
# such that `alpha` starts off as `alpha_init`.
if alpha_init is None:
alpha_init = (alpha_lo + alpha_hi) / 2.
latent_alpha_init = util.inv_affine_sigmoid(
alpha_init, lo=alpha_lo, hi=alpha_hi)
latent_alpha = tf.get_variable(
'LatentAlpha' + var_suffix,
initializer=tf.fill((1, x.shape[1]),
tf.cast(latent_alpha_init, dtype=float_dtype)))
alpha = util.affine_sigmoid(latent_alpha, lo=alpha_lo, hi=alpha_hi)
scale = _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=var_suffix)
loss = distribution.nllfun(x, alpha, scale, **kwargs)
return loss, alpha, scale
def lossfun_students(x, scale_lo=1e-5, scale_init=1., var_suffix=''):
"""A variant of lossfun() that uses the NLL of a Student's t-distribution.
Args:
x: The residual for which the loss is being computed. Must be a rank-2
tensor, where the innermost dimension is the batch index, and the
outermost dimension corresponds to different "channels", where this
function will assign each channel its own variable shape (log-df) and
scale parameters that are constructed as TF variables and can be optimized
over. Must be a TF tensor or numpy array of single or double precision
floats. The precision of `x` will determine the precision of the latent
variables used to model scale and log-df internally.
scale_lo: The lowest possible value for the loss's scale parameters. Must be
> 0 and a scalar. This value may have more of an effect than you think, as
the loss is unbounded as scale approaches zero (say, at a delta function).
scale_init: The initial value used for the loss's scale parameters. This
also defines the zero-point of the latent representation of scales, so SGD
may cause optimization to gravitate towards producing scales near this
value.
Returns:
A tuple of the form (`loss`, `log_df`, `scale`).
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `log_df`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct a scale variable for each dimension of `x` but not for each
batch element. This contains the current estimated scale parameter for
each dimension, and will change during optimization.
`log_df`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct an log-DF variable for each dimension of `x` but not for each
batch element. This contains the current estimated log(degrees-of-freedom)
parameter for each dimension, and will change during optimization.
Raises:
ValueError: If any of the arguments are invalid.
"""
_check_scale(scale_lo, scale_init)
float_dtype = x.dtype
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 2), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
log_df = tf.get_variable(
name='LogDf', initializer=tf.zeros((1, x.shape[1]), float_dtype))
scale = _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=var_suffix)
loss = util.students_t_nll(x, tf.math.exp(log_df), scale)
return loss, log_df, scale
def image_lossfun(x,
color_space='YUV',
representation='CDF9/7',
wavelet_num_levels=5,
wavelet_scale_base=1,
use_students_t=False,
summarize_loss=True,
**kwargs):
"""Computes the adaptive form of the robust loss on a set of images.
This function is a wrapper around lossfun() above. Like lossfun(), this
function is not "stateless" --- it requires inputs of a specific shape and
size, and constructs TF variables describing each non-batch dimension in `x`.
`x` is expected to be the difference between sets of RGB images, and the other
arguments to this function allow for the color space and spatial
representation of `x` to be changed before the loss is imposed. By default,
this function uses a CDF9/7 wavelet decomposition in a YUV color space, which
often works well. This function also returns handles to the scale and
shape parameters (both in the shape of images) being optimized over,
and summarizes both parameters in TensorBoard.
Args:
x: A set of image residuals for which the loss is being computed. Must be a
rank-4 tensor of size (num_batches, width, height, color_channels). This
is assumed to be a set of differences between RGB images.
color_space: The color space that `x` will be transformed into before
computing the loss. Must be 'RGB' (in which case no transformation is
applied) or 'YUV' (in which case we actually use a volume-preserving
scaled YUV colorspace so that log-likelihoods still have meaning, see
util.rgb_to_syuv()). Note that changing this argument does not change the
assumption that `x` is the set of differences between RGB images, it just
changes what color space `x` is converted to from RGB when computing the
loss.
representation: The spatial image representation that `x` will be
transformed into after converting the color space and before computing the
loss. If this is a valid type of wavelet according to
wavelet.generate_filters() then that is what will be used, but we also
support setting this to 'DCT' which applies a 2D DCT to the images, and to
'PIXEL' which applies no transformation to the image, thereby causing the
loss to be imposed directly on pixels.
wavelet_num_levels: If `representation` is a kind of wavelet, this is the
number of levels used when constructing wavelet representations. Otherwise
this is ignored. Should probably be set to as large as possible a value
that is supported by the input resolution, such as that produced by
wavelet.get_max_num_levels().
wavelet_scale_base: If `representation` is a kind of wavelet, this is the
base of the scaling used when constructing wavelet representations.
Otherwise this is ignored. For image_lossfun() to be volume preserving (a
useful property when evaluating generative models) this value must be ==
1. If the goal of this loss isn't proper statistical modeling, then
modifying this value (say, setting it to 0.5 or 2) may significantly
improve performance.
use_students_t: If true, use the NLL of Student's T-distribution instead
of the adaptive loss. This causes all `alpha_*` inputs to be ignored.
summarize_loss: Whether or not to make TF summaries describing the latent
state of the loss function. True by default.
**kwargs: Arguments to be passed to the underlying lossfun().
Returns:
A tuple of the form (`loss`, `alpha`, `scale`). If use_students_t == True,
then `log(df)` is returned instead of `alpha`.
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `alpha`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size
(width, height, color_channels),
as we construct a scale variable for each spatial and color dimension of `x`
but not for each batch element. This contains the current estimated scale
parameter for each dimension, and will change during optimization.
`alpha`: a TF tensor of the same type as x, of size
(width, height, color_channels),
as we construct an alpha variable for each spatial and color dimension of
`x` but not for each batch element. This contains the current estimated
alpha parameter for each dimension, and will change during optimization.
Raises:
ValueError: if `color_space` of `representation` are unsupported color
spaces or image representations, respectively.
"""
color_spaces = ['RGB', 'YUV']
if color_space not in color_spaces:
raise ValueError('`color_space` must be in {}, but is {!r}'.format(
color_spaces, color_space))
representations = wavelet.generate_filters() + ['DCT', 'PIXEL']
if representation not in representations:
raise ValueError('`representation` must be in {}, but is {!r}'.format(
representations, representation))
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 4), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
if color_space == 'YUV':
x = util.rgb_to_syuv(x)
# If `color_space` == 'RGB', do nothing.
# Reshape `x` from
# (num_batches, width, height, num_channels) to
# (num_batches * num_channels, width, height)
_, width, height, num_channels = x.shape.as_list()
x_stack = tf.reshape(tf.transpose(x, (0, 3, 1, 2)), (-1, width, height))
# Turn each channel in `x_stack` into the spatial representation specified
# by `representation`.
if representation in wavelet.generate_filters():
x_stack = wavelet.flatten(
wavelet.rescale(
wavelet.construct(x_stack, wavelet_num_levels, representation),
wavelet_scale_base))
elif representation == 'DCT':
x_stack = util.image_dct(x_stack)
# If `representation` == 'PIXEL', do nothing.
# Reshape `x_stack` from
# (num_batches * num_channels, width, height) to
# (num_batches, num_channels * width * height)
x_mat = tf.reshape(
tf.transpose(
tf.reshape(x_stack, [-1, num_channels, width, height]),
[0, 2, 3, 1]), [-1, width * height * num_channels])
# Set up the adaptive loss. Note, if `use_students_t` == True then
# `alpha_mat` actually contains "log(df)" values.
if use_students_t:
loss_mat, alpha_mat, scale_mat = lossfun_students(x_mat, **kwargs)
else:
loss_mat, alpha_mat, scale_mat = lossfun(x_mat, **kwargs)
# Reshape the loss function's outputs to have the shapes as the input.
loss = tf.reshape(loss_mat, [-1, width, height, num_channels])
alpha = tf.reshape(alpha_mat, [width, height, num_channels])
scale = tf.reshape(scale_mat, [width, height, num_channels])
if summarize_loss:
# Summarize the `alpha` and `scale` parameters as images (normalized to
# [0, 1]) and histograms.
# Note that these may look unintuitive unless the colorspace is 'RGB' and
# the image representation is 'PIXEL', as the image summaries (like most
# images) are rendered as RGB pixels.
alpha_min = tf.reduce_min(alpha)
alpha_max = tf.reduce_max(alpha)
tf.summary.image(
'robust/alpha',
(alpha[tf.newaxis] - alpha_min) / (alpha_max - alpha_min + 1e-10))
tf.summary.histogram('robust/alpha', alpha)
log_scale = tf.math.log(scale)
log_scale_min = tf.reduce_min(log_scale)
log_scale_max = tf.reduce_max(log_scale)
tf.summary.image('robust/log_scale',
(log_scale[tf.newaxis] - log_scale_min) /
(log_scale_max - log_scale_min + 1e-10))
tf.summary.histogram('robust/log_scale', log_scale)
return loss, alpha, scale
| 19,790 | 48.4775 | 89 | py |
stylegan-encoder | stylegan-encoder-master/train_resnet.py | """
Trains a modified Resnet to generate approximate dlatents using examples from a trained StyleGAN.
Props to @SimJeg on GitHub for the original code this is based on, from this thread: https://github.com/Puzer/stylegan-encoder/issues/1#issuecomment-490469454
"""
import os
import math
import numpy as np
import pickle
import cv2
import argparse
import dnnlib
import config
import dnnlib.tflib as tflib
import tensorflow
import keras
import keras.backend as K
from keras.applications.resnet50 import preprocess_input
from keras.layers import Input, LocallyConnected1D, Reshape, Permute, Conv2D, Add
from keras.models import Model, load_model
def generate_dataset_main(n=10000, save_path=None, seed=None, model_res=1024, image_size=256, minibatch_size=16, truncation=0.7):
"""
Generates a dataset of 'n' images of shape ('size', 'size', 3) with random seed 'seed'
along with their dlatent vectors W of shape ('n', 512)
These datasets can serve to train an inverse mapping from X to W as well as explore the latent space
More variation added to latents; also, negative truncation added to balance these examples.
"""
n = n // 2 # this gets doubled because of negative truncation below
model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
Gs = load_Gs()
if (model_scale % 3 == 0):
mod_l = 3
else:
mod_l = 2
if seed is not None:
b = bool(np.random.RandomState(seed).randint(2))
Z = np.random.RandomState(seed).randn(n*mod_l, Gs.input_shape[1])
else:
b = bool(np.random.randint(2))
Z = np.random.randn(n*mod_l, Gs.input_shape[1])
if b:
mod_l = model_scale // 2
mod_r = model_scale // mod_l
if seed is not None:
Z = np.random.RandomState(seed).randn(n*mod_l, Gs.input_shape[1])
else:
Z = np.random.randn(n*mod_l, Gs.input_shape[1])
W = Gs.components.mapping.run(Z, None, minibatch_size=minibatch_size) # Use mapping network to get unique dlatents for more variation.
dlatent_avg = Gs.get_var('dlatent_avg') # [component]
W = (W[np.newaxis] - dlatent_avg) * np.reshape([truncation, -truncation], [-1, 1, 1, 1]) + dlatent_avg # truncation trick and add negative image pair
W = np.append(W[0], W[1], axis=0)
W = W[:, :mod_r]
W = W.reshape((n*2, model_scale, 512))
X = Gs.components.synthesis.run(W, randomize_noise=False, minibatch_size=minibatch_size, print_progress=True,
output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True))
X = np.array([cv2.resize(x, (image_size, image_size), interpolation = cv2.INTER_AREA) for x in X])
#X = preprocess_input(X, backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
X = preprocess_input(X)
return W, X
def generate_dataset(n=10000, save_path=None, seed=None, model_res=1024, image_size=256, minibatch_size=16, truncation=0.7):
"""
Use generate_dataset_main() as a helper function.
Divides requests into batches to save memory.
"""
batch_size = 16
inc = n//batch_size
left = n-((batch_size-1)*inc)
W, X = generate_dataset_main(inc, save_path, seed, model_res, image_size, minibatch_size, truncation)
for i in range(batch_size-2):
aW, aX = generate_dataset_main(inc, save_path, seed, model_res, image_size, minibatch_size, truncation)
W = np.append(W, aW, axis=0)
aW = None
X = np.append(X, aX, axis=0)
aX = None
aW, aX = generate_dataset_main(left, save_path, seed, model_res, image_size, minibatch_size, truncation)
W = np.append(W, aW, axis=0)
aW = None
X = np.append(X, aX, axis=0)
aX = None
if save_path is not None:
prefix = '_{}_{}'.format(seed, n)
np.save(os.path.join(os.path.join(save_path, 'W' + prefix)), W)
np.save(os.path.join(os.path.join(save_path, 'X' + prefix)), X)
return W, X
def is_square(n):
return (n == int(math.sqrt(n) + 0.5)**2)
def get_resnet_model(save_path, model_res=1024, image_size=256, depth=2, size=0, activation='elu', loss='logcosh', optimizer='adam'):
# Build model
if os.path.exists(save_path):
print('Loading model')
return load_model(save_path)
print('Building model')
model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
if size <= 0:
from keras.applications.resnet50 import ResNet50
resnet = ResNet50(include_top=False, pooling=None, weights='imagenet', input_shape=(image_size, image_size, 3))
else:
from keras_applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2
if size == 1:
resnet = ResNet50V2(include_top=False, pooling=None, weights='imagenet', input_shape=(image_size, image_size, 3), backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
if size == 2:
resnet = ResNet101V2(include_top=False, pooling=None, weights='imagenet', input_shape=(image_size, image_size, 3), backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
if size >= 3:
resnet = ResNet152V2(include_top=False, pooling=None, weights='imagenet', input_shape=(image_size, image_size, 3), backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
layer_size = model_scale*8*8*8
if is_square(layer_size): # work out layer dimensions
layer_l = int(math.sqrt(layer_size)+0.5)
layer_r = layer_l
else:
layer_m = math.log(math.sqrt(layer_size),2)
layer_l = 2**math.ceil(layer_m)
layer_r = layer_size // layer_l
layer_l = int(layer_l)
layer_r = int(layer_r)
x_init = None
inp = Input(shape=(image_size, image_size, 3))
x = resnet(inp)
if (depth < 0):
depth = 1
if (size <= 1):
if (size <= 0):
x = Conv2D(model_scale*8, 1, activation=activation)(x) # scale down
x = Reshape((layer_r, layer_l))(x)
else:
x = Conv2D(model_scale*8*4, 1, activation=activation)(x) # scale down a little
x = Reshape((layer_r*2, layer_l*2))(x)
else:
if (size == 2):
x = Conv2D(1024, 1, activation=activation)(x) # scale down a bit
x = Reshape((256, 256))(x)
else:
x = Reshape((256, 512))(x) # all weights used
while (depth > 0): # See https://github.com/OliverRichter/TreeConnect/blob/master/cifar.py - TreeConnect inspired layers instead of dense layers.
x = LocallyConnected1D(layer_r, 1, activation=activation)(x)
x = Permute((2, 1))(x)
x = LocallyConnected1D(layer_l, 1, activation=activation)(x)
x = Permute((2, 1))(x)
if x_init is not None:
x = Add()([x, x_init]) # add skip connection
x_init = x
depth-=1
x = Reshape((model_scale, 512))(x) # train against all dlatent values
model = Model(inputs=inp,outputs=x)
model.compile(loss=loss, metrics=[], optimizer=optimizer) # By default: adam optimizer, logcosh used for loss.
return model
def finetune_resnet(model, save_path, model_res=1024, image_size=256, batch_size=10000, test_size=1000, n_epochs=10, max_patience=5, seed=0, minibatch_size=32, truncation=0.7):
"""
Finetunes a resnet to predict W from X
Generate batches (X, W) of size 'batch_size', iterates 'n_epochs', and repeat while 'max_patience' is reached
on the test set. The model is saved every time a new best test loss is reached.
"""
assert image_size >= 224
# Create a test set
print('Creating test set:')
np.random.seed(seed)
W_test, X_test = generate_dataset(n=test_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation)
# Iterate on batches of size batch_size
print('Generating training set:')
patience = 0
best_loss = np.inf
#loss = model.evaluate(X_test, W_test)
#print('Initial test loss : {:.5f}'.format(loss))
while (patience <= max_patience):
W_train = X_train = None
W_train, X_train = generate_dataset(batch_size, model_res=model_res, image_size=image_size, seed=seed, minibatch_size=minibatch_size, truncation=truncation)
model.fit(X_train, W_train, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
loss = model.evaluate(X_test, W_test, batch_size=minibatch_size)
if loss < best_loss:
print('New best test loss : {:.5f}'.format(loss))
patience = 0
best_loss = loss
else:
print('Test loss : {:.5f}'.format(loss))
patience += 1
if (patience > max_patience): # When done with test set, train with it and discard.
print('Done with current test set.')
model.fit(X_test, W_test, epochs=n_epochs, verbose=True, batch_size=minibatch_size)
print('Saving model.')
model.save(save_path)
parser = argparse.ArgumentParser(description='Train a ResNet to predict latent representations of images in a StyleGAN model from generated examples', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model_url', default='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', help='Fetch a StyleGAN model to train on from this URL')
parser.add_argument('--model_res', default=1024, help='The dimension of images in the StyleGAN model', type=int)
parser.add_argument('--data_dir', default='data', help='Directory for storing the ResNet model')
parser.add_argument('--model_path', default='data/finetuned_resnet.h5', help='Save / load / create the ResNet model with this file path')
parser.add_argument('--model_depth', default=1, help='Number of TreeConnect layers to add after ResNet', type=int)
parser.add_argument('--model_size', default=1, help='Model size - 0 - small, 1 - medium, 2 - large, 3 - full.', type=int)
parser.add_argument('--activation', default='elu', help='Activation function to use after ResNet')
parser.add_argument('--optimizer', default='adam', help='Optimizer to use')
parser.add_argument('--loss', default='logcosh', help='Loss function to use')
parser.add_argument('--use_fp16', default=False, help='Use 16-bit floating point', type=bool)
parser.add_argument('--image_size', default=256, help='Size of images for ResNet model', type=int)
parser.add_argument('--batch_size', default=2048, help='Batch size for training the ResNet model', type=int)
parser.add_argument('--test_size', default=512, help='Batch size for testing the ResNet model', type=int)
parser.add_argument('--truncation', default=0.7, help='Generate images using truncation trick', type=float)
parser.add_argument('--max_patience', default=2, help='Number of iterations to wait while test loss does not improve', type=int)
parser.add_argument('--freeze_first', default=False, help='Start training with the pre-trained network frozen, then unfreeze', type=bool)
parser.add_argument('--epochs', default=2, help='Number of training epochs to run for each batch', type=int)
parser.add_argument('--minibatch_size', default=16, help='Size of minibatches for training and generation', type=int)
parser.add_argument('--seed', default=-1, help='Pick a random seed for reproducibility (-1 for no random seed selected)', type=int)
parser.add_argument('--loop', default=-1, help='Run this many iterations (-1 for infinite, halt with CTRL-C)', type=int)
args, other_args = parser.parse_known_args()
os.makedirs(args.data_dir, exist_ok=True)
if args.seed == -1:
args.seed = None
if args.use_fp16:
K.set_floatx('float16')
K.set_epsilon(1e-4)
tflib.init_tf()
model = get_resnet_model(args.model_path, model_res=args.model_res, depth=args.model_depth, size=args.model_size, activation=args.activation, optimizer=args.optimizer, loss=args.loss)
with dnnlib.util.open_url(args.model_url, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
def load_Gs():
return Gs_network
if args.freeze_first:
model.layers[1].trainable = False
model.compile(loss=args.loss, metrics=[], optimizer=args.optimizer)
model.summary()
if args.freeze_first: # run a training iteration first while pretrained model is frozen, then unfreeze.
finetune_resnet(model, args.model_path, model_res=args.model_res, image_size=args.image_size, batch_size=args.batch_size, test_size=args.test_size, max_patience=args.max_patience, n_epochs=args.epochs, seed=args.seed, minibatch_size=args.minibatch_size, truncation=args.truncation)
model.layers[1].trainable = True
model.compile(loss=args.loss, metrics=[], optimizer=args.optimizer)
model.summary()
if args.loop < 0:
while True:
finetune_resnet(model, args.model_path, model_res=args.model_res, image_size=args.image_size, batch_size=args.batch_size, test_size=args.test_size, max_patience=args.max_patience, n_epochs=args.epochs, seed=args.seed, minibatch_size=args.minibatch_size, truncation=args.truncation)
else:
count = args.loop
while count > 0:
finetune_resnet(model, args.model_path, model_res=args.model_res, image_size=args.image_size, batch_size=args.batch_size, test_size=args.test_size, max_patience=args.max_patience, n_epochs=args.epochs, seed=args.seed, minibatch_size=args.minibatch_size, truncation=args.truncation)
count -= 1
| 13,439 | 49.337079 | 289 | py |
stylegan-encoder | stylegan-encoder-master/config.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Global configuration."""
#----------------------------------------------------------------------------
# Paths.
result_dir = 'results'
data_dir = 'datasets'
cache_dir = 'cache'
run_dir_ignore = ['results', 'datasets', 'cache']
# experimental - replace Dense layers with TreeConnect
use_treeconnect = False
treeconnect_threshold = 1024
#----------------------------------------------------------------------------
| 762 | 32.173913 | 77 | py |
stylegan-encoder | stylegan-encoder-master/run_metrics.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Main entry point for training StyleGAN and ProGAN networks."""
import dnnlib
from dnnlib import EasyDict
import dnnlib.tflib as tflib
import config
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
def run_pickle(submit_config, metric_args, network_pkl, dataset_args, mirror_augment):
ctx = dnnlib.RunContext(submit_config)
tflib.init_tf()
print('Evaluating %s metric on network_pkl "%s"...' % (metric_args.name, network_pkl))
metric = dnnlib.util.call_func_by_name(**metric_args)
print()
metric.run(network_pkl, dataset_args=dataset_args, mirror_augment=mirror_augment, num_gpus=submit_config.num_gpus)
print()
ctx.close()
#----------------------------------------------------------------------------
def run_snapshot(submit_config, metric_args, run_id, snapshot):
ctx = dnnlib.RunContext(submit_config)
tflib.init_tf()
print('Evaluating %s metric on run_id %s, snapshot %s...' % (metric_args.name, run_id, snapshot))
run_dir = misc.locate_run_dir(run_id)
network_pkl = misc.locate_network_pkl(run_dir, snapshot)
metric = dnnlib.util.call_func_by_name(**metric_args)
print()
metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus)
print()
ctx.close()
#----------------------------------------------------------------------------
def run_all_snapshots(submit_config, metric_args, run_id):
ctx = dnnlib.RunContext(submit_config)
tflib.init_tf()
print('Evaluating %s metric on all snapshots of run_id %s...' % (metric_args.name, run_id))
run_dir = misc.locate_run_dir(run_id)
network_pkls = misc.list_network_pkls(run_dir)
metric = dnnlib.util.call_func_by_name(**metric_args)
print()
for idx, network_pkl in enumerate(network_pkls):
ctx.update('', idx, len(network_pkls))
metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus)
print()
ctx.close()
#----------------------------------------------------------------------------
def main():
submit_config = dnnlib.SubmitConfig()
# Which metrics to evaluate?
metrics = []
metrics += [metric_base.fid50k]
#metrics += [metric_base.ppl_zfull]
#metrics += [metric_base.ppl_wfull]
#metrics += [metric_base.ppl_zend]
#metrics += [metric_base.ppl_wend]
#metrics += [metric_base.ls]
#metrics += [metric_base.dummy]
# Which networks to evaluate them on?
tasks = []
tasks += [EasyDict(run_func_name='run_metrics.run_pickle', network_pkl='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', dataset_args=EasyDict(tfrecord_dir='ffhq', shuffle_mb=0), mirror_augment=True)] # karras2019stylegan-ffhq-1024x1024.pkl
#tasks += [EasyDict(run_func_name='run_metrics.run_snapshot', run_id=100, snapshot=25000)]
#tasks += [EasyDict(run_func_name='run_metrics.run_all_snapshots', run_id=100)]
# How many GPUs to use?
submit_config.num_gpus = 1
#submit_config.num_gpus = 2
#submit_config.num_gpus = 4
#submit_config.num_gpus = 8
# Execute.
submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
submit_config.run_dir_ignore += config.run_dir_ignore
for task in tasks:
for metric in metrics:
submit_config.run_desc = '%s-%s' % (task.run_func_name, metric.name)
if task.run_func_name.endswith('run_snapshot'):
submit_config.run_desc += '-%s-%s' % (task.run_id, task.snapshot)
if task.run_func_name.endswith('run_all_snapshots'):
submit_config.run_desc += '-%s' % task.run_id
submit_config.run_desc += '-%dgpu' % submit_config.num_gpus
dnnlib.submit_run(submit_config, metric_args=metric, **task)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 4,374 | 40.273585 | 262 | py |
stylegan-encoder | stylegan-encoder-master/align_images.py | import os
import sys
import bz2
import argparse
from keras.utils import get_file
from ffhq_dataset.face_alignment import image_align
from ffhq_dataset.landmarks_detector import LandmarksDetector
import multiprocessing
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
def unpack_bz2(src_path):
data = bz2.BZ2File(src_path).read()
dst_path = src_path[:-4]
with open(dst_path, 'wb') as fp:
fp.write(data)
return dst_path
if __name__ == "__main__":
"""
Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
python align_images.py /raw_images /aligned_images
"""
parser = argparse.ArgumentParser(description='Align faces from input images', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('raw_dir', help='Directory with raw images for face alignment')
parser.add_argument('aligned_dir', help='Directory for storing aligned images')
parser.add_argument('--output_size', default=1024, help='The dimension of images for input to the model', type=int)
parser.add_argument('--x_scale', default=1, help='Scaling factor for x dimension', type=float)
parser.add_argument('--y_scale', default=1, help='Scaling factor for y dimension', type=float)
parser.add_argument('--em_scale', default=0.1, help='Scaling factor for eye-mouth distance', type=float)
parser.add_argument('--use_alpha', default=False, help='Add an alpha channel for masking', type=bool)
args, other_args = parser.parse_known_args()
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
LANDMARKS_MODEL_URL, cache_subdir='temp'))
RAW_IMAGES_DIR = args.raw_dir
ALIGNED_IMAGES_DIR = args.aligned_dir
landmarks_detector = LandmarksDetector(landmarks_model_path)
for img_name in os.listdir(RAW_IMAGES_DIR):
print('Aligning %s ...' % img_name)
try:
raw_img_path = os.path.join(RAW_IMAGES_DIR, img_name)
fn = face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], 1)
if os.path.isfile(fn):
continue
print('Getting landmarks...')
for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
try:
print('Starting face alignment...')
face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
aligned_face_path = os.path.join(ALIGNED_IMAGES_DIR, face_img_name)
image_align(raw_img_path, aligned_face_path, face_landmarks, output_size=args.output_size, x_scale=args.x_scale, y_scale=args.y_scale, em_scale=args.em_scale, alpha=args.use_alpha)
print('Wrote result %s' % aligned_face_path)
except:
print("Exception in face alignment!")
except:
print("Exception in landmark detection!")
| 3,050 | 48.209677 | 200 | py |
stylegan-encoder | stylegan-encoder-master/swa.py | """
Stochastic Weight Averaging: https://arxiv.org/abs/1803.05407
See: https://github.com/kristpapadopoulos/keras-stochastic-weight-averaging
"""
import os
import glob
import pickle
import argparse
from dnnlib.tflib import init_tf
filepath = 'output.pkl'
def fetch_models_from_files(model_list):
for fn in model_list:
with open(fn, 'rb') as f:
yield pickle.load(f)
def apply_swa_to_checkpoints(models):
gen, dis, gs = next(models)
print('Loading', end='', flush=True)
mod_gen = gen
mod_dis = dis
mod_gs = gs
epoch = 0
try:
while True:
epoch += 1
gen, dis, gs = next(models)
if gs is None:
print("")
break
mod_gen.apply_swa(gen, epoch)
mod_dis.apply_swa(dis, epoch)
mod_gs.apply_swa(gs, epoch)
print('.', end='', flush=True)
except:
print("")
return (mod_gen, mod_dis, mod_gs)
parser = argparse.ArgumentParser(description='Perform stochastic weight averaging', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('results_dir', help='Directory with network checkpoints for weight averaging')
parser.add_argument('--filespec', default='network*.pkl', help='The files to average')
parser.add_argument('--output_model', default='network_avg.pkl', help='The averaged model to output')
parser.add_argument('--count', default=6, help='Average the last n checkpoints', type=int)
args, other_args = parser.parse_known_args()
swa_epochs = args.count
filepath = args.output_model
files = glob.glob(os.path.join(args.results_dir,args.filespec))
if (len(files)>swa_epochs):
files = files[-swa_epochs:]
files.sort()
print(files)
init_tf()
models = fetch_models_from_files(files)
swa_models = apply_swa_to_checkpoints(models)
print('Final model parameters set to stochastic weight average.')
with open(filepath, 'wb') as f:
pickle.dump(swa_models, f)
print('Final stochastic averaged weights saved to file.')
| 2,025 | 31.15873 | 139 | py |
stylegan-encoder | stylegan-encoder-master/dataset_tool.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN."""
# pylint: disable=too-many-lines
import os
import sys
import glob
import argparse
import threading
import six.moves.queue as Queue # pylint: disable=import-error
import traceback
import numpy as np
import tensorflow as tf
import PIL.Image
import dnnlib.tflib as tflib
from training import dataset
#----------------------------------------------------------------------------
def error(msg):
print('Error: ' + msg)
exit(1)
#----------------------------------------------------------------------------
class TFRecordExporter:
def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):
self.tfrecord_dir = tfrecord_dir
self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
self.expected_images = expected_images
self.cur_images = 0
self.shape = None
self.resolution_log2 = None
self.tfr_writers = []
self.print_progress = print_progress
self.progress_interval = progress_interval
if self.print_progress:
print('Creating dataset "%s"' % tfrecord_dir)
if not os.path.isdir(self.tfrecord_dir):
os.makedirs(self.tfrecord_dir)
assert os.path.isdir(self.tfrecord_dir)
def close(self):
if self.print_progress:
print('%-40s\r' % 'Flushing data...', end='', flush=True)
for tfr_writer in self.tfr_writers:
tfr_writer.close()
self.tfr_writers = []
if self.print_progress:
print('%-40s\r' % '', end='', flush=True)
print('Added %d images.' % self.cur_images)
def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
order = np.arange(self.expected_images)
np.random.RandomState(123).shuffle(order)
return order
def add_image(self, img):
if self.print_progress and self.cur_images % self.progress_interval == 0:
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2**self.resolution_log2
tfr_opt = tf.io.TFRecordOptions(compression_type=None, input_buffer_size=16777216, output_buffer_size=104857600)
for lod in range(self.resolution_log2 - 1):
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(tf.io.TFRecordWriter(tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
quant = np.rint(img).clip(0, 255).astype(np.uint8)
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_labels(self, labels):
if self.print_progress:
print('%-40s\r' % 'Saving labels...', end='', flush=True)
assert labels.shape[0] == self.cur_images
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, labels.astype(np.float32))
def add_sentence_embedding(self, embeddings):
# save as -rxx.labels for easy compatibility with rest of codebase.
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, embeddings.astype(np.float32))
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#----------------------------------------------------------------------------
class ExceptionInfo(object):
def __init__(self):
self.value = sys.exc_info()[1]
self.traceback = traceback.format_exc()
#----------------------------------------------------------------------------
class WorkerThread(threading.Thread):
def __init__(self, task_queue):
threading.Thread.__init__(self)
self.task_queue = task_queue
def run(self):
while True:
func, args, result_queue = self.task_queue.get()
if func is None:
break
try:
result = func(*args)
except:
result = ExceptionInfo()
result_queue.put((result, args))
#----------------------------------------------------------------------------
class ThreadPool(object):
def __init__(self, num_threads):
assert num_threads >= 1
self.task_queue = Queue.Queue()
self.result_queues = dict()
self.num_threads = num_threads
for _idx in range(self.num_threads):
thread = WorkerThread(self.task_queue)
thread.daemon = True
thread.start()
def add_task(self, func, args=()):
assert hasattr(func, '__call__') # must be a function
if func not in self.result_queues:
self.result_queues[func] = Queue.Queue()
self.task_queue.put((func, args, self.result_queues[func]))
def get_result(self, func): # returns (result, args)
result, args = self.result_queues[func].get()
if isinstance(result, ExceptionInfo):
print('\n\nWorker thread caught an exception:\n' + result.traceback)
raise result.value
return result, args
def finish(self):
for _idx in range(self.num_threads):
self.task_queue.put((None, (), None))
def __enter__(self): # for 'with' statement
return self
def __exit__(self, *excinfo):
self.finish()
def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None):
if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4
assert max_items_in_flight >= 1
results = []
retire_idx = [0]
def task_func(prepared, _idx):
return process_func(prepared)
def retire_result():
processed, (_prepared, idx) = self.get_result(task_func)
results[idx] = processed
while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:
yield post_func(results[retire_idx[0]])
results[retire_idx[0]] = None
retire_idx[0] += 1
for idx, item in enumerate(item_iterator):
prepared = pre_func(item)
results.append(None)
self.add_task(func=task_func, args=(prepared, idx))
while retire_idx[0] < idx - max_items_in_flight + 2:
for res in retire_result(): yield res
while retire_idx[0] < len(results):
for res in retire_result(): yield res
#----------------------------------------------------------------------------
def display(tfrecord_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tflib.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)
tflib.init_uninitialized_vars()
import cv2 # pip install opencv-python
idx = 0
while True:
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if idx == 0:
print('Displaying images')
cv2.namedWindow('dataset_tool')
print('Press SPACE or ENTER to advance, ESC to exit')
print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist()))
cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR
idx += 1
if cv2.waitKey() == 27:
break
print('\nDisplayed %d images.' % idx)
#----------------------------------------------------------------------------
def extract(tfrecord_dir, output_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tflib.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)
tflib.init_uninitialized_vars()
print('Extracting images to "%s"' % output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
idx = 0
while True:
if idx % 10 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images, _labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if images.shape[1] == 1:
img = PIL.Image.fromarray(images[0][0], 'L')
else:
img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')
img.save(os.path.join(output_dir, 'img%08d.png' % idx))
idx += 1
print('Extracted %d images.' % idx)
#----------------------------------------------------------------------------
def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels):
max_label_size = 0 if ignore_labels else 'full'
print('Loading dataset "%s"' % tfrecord_dir_a)
tflib.init_tf({'gpu_options.allow_growth': True})
dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
print('Loading dataset "%s"' % tfrecord_dir_b)
dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
tflib.init_uninitialized_vars()
print('Comparing datasets')
idx = 0
identical_images = 0
identical_labels = 0
while True:
if idx % 100 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images_a, labels_a = dset_a.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_a, labels_a = None, None
try:
images_b, labels_b = dset_b.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_b, labels_b = None, None
if images_a is None or images_b is None:
if images_a is not None or images_b is not None:
print('Datasets contain different number of images')
break
if images_a.shape == images_b.shape and np.all(images_a == images_b):
identical_images += 1
else:
print('Image %d is different' % idx)
if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b):
identical_labels += 1
else:
print('Label %d is different' % idx)
idx += 1
print('Identical images: %d / %d' % (identical_images, idx))
if not ignore_labels:
print('Identical labels: %d / %d' % (identical_labels, idx))
#----------------------------------------------------------------------------
def create_mnist(tfrecord_dir, mnist_dir):
print('Loading MNIST from "%s"' % mnist_dir)
import gzip
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16)
with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
images = images.reshape(-1, 1, 28, 28)
images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
assert labels.shape == (60000,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):
print('Loading MNIST from "%s"' % mnist_dir)
import gzip
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16)
images = images.reshape(-1, 28, 28)
images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
with TFRecordExporter(tfrecord_dir, num_images) as tfr:
rnd = np.random.RandomState(random_seed)
for _idx in range(num_images):
tfr.add_image(images[rnd.randint(images.shape[0], size=3)])
#----------------------------------------------------------------------------
def create_cifar10(tfrecord_dir, cifar10_dir):
print('Loading CIFAR-10 from "%s"' % cifar10_dir)
import pickle
images = []
labels = []
for batch in range(1, 6):
with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images.append(data['data'].reshape(-1, 3, 32, 32))
labels.append(data['labels'])
images = np.concatenate(images)
labels = np.concatenate(labels)
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (50000,) and labels.dtype == np.int32
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_cifar100(tfrecord_dir, cifar100_dir):
print('Loading CIFAR-100 from "%s"' % cifar100_dir)
import pickle
with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images = data['data'].reshape(-1, 3, 32, 32)
labels = np.array(data['fine_labels'])
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (50000,) and labels.dtype == np.int32
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 99
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_svhn(tfrecord_dir, svhn_dir):
print('Loading SVHN from "%s"' % svhn_dir)
import pickle
images = []
labels = []
for batch in range(1, 4):
with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images.append(data[0])
labels.append(data[1])
images = np.concatenate(images)
labels = np.concatenate(labels)
assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (73257,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None):
print('Loading LSUN dataset from "%s"' % lmdb_dir)
import lmdb # pip install lmdb # pylint: disable=import-error
import cv2 # pip install opencv-python
import io
with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:
total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter
if max_images is None:
max_images = total_images
with TFRecordExporter(tfrecord_dir, max_images) as tfr:
for _idx, (_key, value) in enumerate(txn.cursor()):
try:
try:
img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)
if img is None:
raise IOError('cv2.imdecode failed')
img = img[:, :, ::-1] # BGR => RGB
except IOError:
img = np.asarray(PIL.Image.open(io.BytesIO(value)))
crop = np.min(img.shape[:2])
img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS)
img = np.asarray(img)
img = img.transpose([2, 0, 1]) # HWC => CHW
tfr.add_image(img)
except:
print(sys.exc_info()[1])
if tfr.cur_images == max_images:
break
#----------------------------------------------------------------------------
def create_lsun_wide(tfrecord_dir, lmdb_dir, width=512, height=384, max_images=None):
assert width == 2 ** int(np.round(np.log2(width)))
assert height <= width
print('Loading LSUN dataset from "%s"' % lmdb_dir)
import lmdb # pip install lmdb # pylint: disable=import-error
import cv2 # pip install opencv-python
import io
with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:
total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter
if max_images is None:
max_images = total_images
with TFRecordExporter(tfrecord_dir, max_images, print_progress=False) as tfr:
for idx, (_key, value) in enumerate(txn.cursor()):
try:
try:
img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)
if img is None:
raise IOError('cv2.imdecode failed')
img = img[:, :, ::-1] # BGR => RGB
except IOError:
img = np.asarray(PIL.Image.open(io.BytesIO(value)))
ch = int(np.round(width * img.shape[0] / img.shape[1]))
if img.shape[1] < width or ch < height:
continue
img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2]
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((width, height), PIL.Image.ANTIALIAS)
img = np.asarray(img)
img = img.transpose([2, 0, 1]) # HWC => CHW
canvas = np.zeros([3, width, width], dtype=np.uint8)
canvas[:, (width - height) // 2 : (width + height) // 2] = img
tfr.add_image(canvas)
print('\r%d / %d => %d ' % (idx + 1, total_images, tfr.cur_images), end='')
except:
print(sys.exc_info()[1])
if tfr.cur_images == max_images:
break
print()
#----------------------------------------------------------------------------
def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):
print('Loading CelebA from "%s"' % celeba_dir)
glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')
image_filenames = sorted(glob.glob(glob_pattern))
expected_images = 202599
if len(image_filenames) != expected_images:
error('Expected to find %d images' % expected_images)
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
assert img.shape == (218, 178, 3)
img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
#----------------------------------------------------------------------------
def create_celebaHQ(tfrecord_dir, celeba_dir, delta_dir, num_threads=4, num_tasks=100, conditioning='none'):
print('Loading CelebA from "%s"' % celeba_dir)
expected_images = 202599
if len(glob.glob(os.path.join(celeba_dir, 'Img/img_celeba', '*.jpg'))) != expected_images:
error('Expected to find %d images' % expected_images)
with open(os.path.join(celeba_dir, 'Anno', 'list_landmarks_celeba.txt'), 'rt') as file:
landmarks = [[float(value) for value in line.split()[1:]] for line in file.readlines()[2:]]
landmarks = np.float32(landmarks).reshape(-1, 5, 2)
if conditioning == 'binary':
print('Loading binary attributes')
with open(os.path.join(celeba_dir, 'Anno', 'list_attr_celeba.txt'), 'rt') as f:
# Load binary attributes
attributes = np.array([[float(x) if float(x) == 1.0 else 0.0 for x in l.split()[1:]] for l in f.readlines()[2:]], dtype=np.float32)
elif conditioning == 'textual':
print('Loading textual descriptions')
# Load textual descriptions
# TODO: how to handle multiple descriptions???
raise NotImplementedError
print('Loading CelebA-HQ deltas from "%s"' % delta_dir)
import scipy.ndimage
import hashlib
import bz2
import zipfile
import base64
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.kdf.pbkdf2
import cryptography.fernet
expected_zips = 30
if len(glob.glob(os.path.join(delta_dir, 'delta*.zip'))) != expected_zips:
error('Expected to find %d zips' % expected_zips)
with open(os.path.join(delta_dir, 'image_list.txt'), 'rt') as file:
lines = [line.split() for line in file]
fields = dict()
for idx, field in enumerate(lines[0]):
type = int if field.endswith('idx') else str
fields[field] = [type(line[idx]) for line in lines[1:]]
indices = np.array(fields['idx'])
# Must use pillow version 3.1.1 for everything to work correctly.
if getattr(PIL, 'PILLOW_VERSION', '') != '3.1.1':
error('create_celebahq requires pillow version 3.1.1') # conda install pillow=3.1.1
# Must use libjpeg version 8d for everything to work correctly.
img = np.array(PIL.Image.open(os.path.join(celeba_dir, 'Img/img_celeba', '000001.jpg')))
md5 = hashlib.md5()
md5.update(img.tobytes())
if md5.hexdigest() != '9cad8178d6cb0196b36f7b34bc5eb6d3':
error('create_celebahq requires libjpeg version 8d') # conda install jpeg=8d
def rot90(v):
return np.array([-v[1], v[0]])
def process_func(idx):
# Load original image.
orig_idx = fields['orig_idx'][idx]
orig_file = fields['orig_file'][idx]
orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file)
img = PIL.Image.open(orig_path)
# Choose oriented crop rectangle.
lm = landmarks[orig_idx]
eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5
mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5
eye_to_eye = lm[1] - lm[0]
eye_to_mouth = mouth_avg - eye_avg
x = eye_to_eye - rot90(eye_to_mouth)
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = rot90(x)
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
zoom = 1024 / (np.hypot(*x) * 2)
# Shrink.
shrink = int(np.floor(0.5 / zoom))
if shrink > 1:
size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink)))
img = img.resize(size, PIL.Image.ANTIALIAS)
quad /= shrink
zoom *= shrink
# Crop.
border = max(int(np.round(1024 * 0.1 / zoom)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Simulate super-resolution.
superres = int(np.exp2(np.ceil(np.log2(zoom))))
if superres > 1:
img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS)
quad *= superres
zoom /= superres
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if max(pad) > border - 4:
pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.mgrid[:h, :w, :1]
mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum(np.float32(w-1-x) / pad[2], np.float32(h-1-y) / pad[3]))
blur = 1024 * 0.02 / zoom
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB')
quad += pad[0:2]
# Transform.
img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
img = img.resize((1024, 1024), PIL.Image.ANTIALIAS)
img = np.asarray(img).transpose(2, 0, 1)
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['proc_md5'][idx]
# Load delta image and original JPG.
with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip:
delta_bytes = zip.read('delta%05d.dat' % idx)
with open(orig_path, 'rb') as file:
orig_bytes = file.read()
# Decrypt delta image, using original JPG data as decryption key.
algorithm = cryptography.hazmat.primitives.hashes.SHA256()
backend = cryptography.hazmat.backends.default_backend()
salt = bytes(orig_file, 'ascii')
kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend)
key = base64.urlsafe_b64encode(kdf.derive(orig_bytes))
delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024)
# Apply delta image.
img = img + delta
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['final_md5'][idx]
return img
print('Saving')
quit()
with TFRecordExporter(tfrecord_dir, indices.size) as tfr:
order = tfr.choose_shuffled_order()
with ThreadPool(num_threads) as pool:
for img in pool.process_items_concurrently(indices[order].tolist(), process_func=process_func, max_items_in_flight=num_tasks):
tfr.add_image(img)
if conditioning == 'binary':
tfr.add_labels(attributes[order])
elif conditioning == 'textual':
tfr.add_text_description()
# TODO: test (i.e., load some images and check their attributes. Does it match?)
#----------------------------------------------------------------------------
def create_CUB(tfrecord_dir, CUB_dir):
# Include tags and features (?)
raise NotImplementedError
#----------------------------------------------------------------------------
def create_coco(tfrecord_dir, coco_dir, res=256, type='test'):
import json
import pickle
tfrecord_dir = tfrecord_dir+'_{}'.format(type)
# annotations = json.load(open(os.path.join(coco_dir, 'annotations/instances_train2014.json')))
filenames = np.load(os.path.join(coco_dir, '{}_filenames.npy'.format(type)))
embeddings = np.load(os.path.join(coco_dir, '{}_caption_features.npy'.format(type)))
fns = []
assert len(filenames) == len(embeddings)
with TFRecordExporter(tfrecord_dir, len(embeddings)) as tfr:
for i, fn in enumerate(filenames):
# img = PIL.Image.open(os.path.join(coco_dir,'{}2014'.format(type),filenames[i][0]))
# img = np.asarray(img.resize((res, res)))
#
# channels = img.shape[2] if img.ndim == 3 else 1
#
# if channels == 1:
# new_img = np.zeros((3, img.shape[0], img.shape[1]))
# new_img[0], new_img[1], new_img[2] = img, img, img
# img = new_img
#
# else:
# img = img.transpose([2, 0, 1]) # HWC => CHW
# tfr.add_image(img)
# Save filenames (can be used with index)
fns.append(fn[0][fn[0].rindex('/')+1:])
# Add all embeddings
tfr.add_sentence_embedding(embeddings.astype(np.float32))
np.save(open('fns.npy', 'wb'), fns)
#----------------------------------------------------------------------------
def create_from_images(tfrecord_dir, image_dir, shuffle, resolution=512, max_images=4000000000):
print('Loading images from "%s"' % image_dir)
image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
if len(image_filenames) == 0:
error('No input images found')
image = PIL.Image.open(image_filenames[0])
img = np.asarray(image)
res = img.shape[0]
channels = img.shape[2] if img.ndim == 3 else 1
if img.shape[1] != res:
error('Input images must have the same width and height')
if resolution != 2 ** int(np.floor(np.log2(resolution))):
error('Input image resolution must be a power-of-two')
if channels not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
for idx in range(order.size):
try:
img = PIL.Image.open(image_filenames[order[idx]])
if (res != resolution):
img = img.resize((resolution,resolution))
img = np.asarray(img)
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose([2, 0, 1]) # HWC => CHW
tfr.add_image(img)
except:
print("Exception in " + image_filenames[order[idx]])
if (tfr.cur_images >= max_images):
break
#----------------------------------------------------------------------------
def create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle):
print('Loading HDF5 archive from "%s"' % hdf5_filename)
import h5py # conda install h5py
with h5py.File(hdf5_filename, 'r') as hdf5_file:
hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3])
with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0])
for idx in range(order.size):
tfr.add_image(hdf5_data[order[idx]])
npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy'
if os.path.isfile(npy_filename):
tfr.add_labels(np.load(npy_filename)[order])
#----------------------------------------------------------------------------
def execute_cmdline(argv):
prog = argv[0]
parser = argparse.ArgumentParser(
prog = prog,
description = 'Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN.',
epilog = 'Type "%s <command> -h" for more information.' % prog)
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
def add_command(cmd, desc, example=None):
epilog = 'Example: %s %s' % (prog, example) if example is not None else None
return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
p = add_command( 'display', 'Display images in dataset.',
'display datasets/mnist')
p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
p = add_command( 'extract', 'Extract images from dataset.',
'extract datasets/mnist mnist-images')
p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
p.add_argument( 'output_dir', help='Directory to extract the images into')
p = add_command( 'compare', 'Compare two datasets.',
'compare datasets/mydataset datasets/mnist')
p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset')
p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset')
p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0)
p = add_command( 'create_mnist', 'Create dataset for MNIST.',
'create_mnist datasets/mnist ~/downloads/mnist')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'mnist_dir', help='Directory containing MNIST')
p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.',
'create_mnistrgb datasets/mnistrgb ~/downloads/mnist')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'mnist_dir', help='Directory containing MNIST')
p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000)
p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123)
p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.',
'create_cifar10 datasets/cifar10 ~/downloads/cifar10')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10')
p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.',
'create_cifar100 datasets/cifar100 ~/downloads/cifar100')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100')
p = add_command( 'create_svhn', 'Create dataset for SVHN.',
'create_svhn datasets/svhn ~/downloads/svhn')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'svhn_dir', help='Directory containing SVHN')
p = add_command( 'create_lsun', 'Create dataset for single LSUN category.',
'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'lmdb_dir', help='Directory containing LMDB database')
p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256)
p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None)
p = add_command( 'create_lsun_wide', 'Create LSUN dataset with non-square aspect ratio.',
'create_lsun_wide datasets/lsun-car-512x384 ~/downloads/lsun/car_lmdb --width 512 --height 384')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'lmdb_dir', help='Directory containing LMDB database')
p.add_argument( '--width', help='Output width (default: 512)', type=int, default=512)
p.add_argument( '--height', help='Output height (default: 384)', type=int, default=384)
p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None)
p = add_command( 'create_celeba', 'Create dataset for CelebA.',
'create_celeba datasets/celeba ~/downloads/celeba')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89)
p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121)
p = add_command( 'create_celebaHQ', 'Create dataset for CelebA-HQ',
'create_celebahq datasets/celeba ~/downloads/celebahq')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
p.add_argument( 'delta_dir', help='Directory containing CelebA-HQ deltas')
# p.add_argument( 'num_threads', help='Number of threads to use (default: 4)', type=int, default=4)
# p.add_argument( 'num_tasks', help='Number of tasks to perform in parallel (default: 100)', type=int, default=100)
p.add_argument( 'conditioning', help='Type of conditioning (default: "none")', type=str, default='none')
# p = add_command( 'create_CUB', 'Create dataset for CUB birds',
# 'create_CUB datasets/CUB ~/downloads/CUB')
p = add_command( 'create_coco', 'Create dataset for MSCOCO',
'create_COCO datasets/coco ~/downloads/coco')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'coco_dir', help='Directory containing MSCOCO')
p = add_command( 'create_from_images', 'Create dataset from a directory full of images.',
'create_from_images datasets/mydataset myimagedir')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'image_dir', help='Directory containing the images')
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
p.add_argument( '--resolution', help='Output resolution (default: 512)', type=int, default=512)
p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=4000000000)
p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.',
'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images')
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])
func = globals()[args.command]
del args.command
func(**vars(args))
#----------------------------------------------------------------------------
if __name__ == "__main__":
execute_cmdline(sys.argv)
#----------------------------------------------------------------------------
| 42,105 | 45.372247 | 163 | py |
stylegan-encoder | stylegan-encoder-master/train.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Main entry point for training StyleGAN and ProGAN networks."""
import copy
import dnnlib
from dnnlib import EasyDict
import config
from metrics import metric_base
#----------------------------------------------------------------------------
# Official training configs for StyleGAN, targeted mainly for FFHQ.
if 1:
desc = 'sgan' # Description string included in result subdir name.
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_stylegan.G_style') # Options for generator network.
D = EasyDict(func_name='training.networks_stylegan.D_basic') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_logistic_nonsaturating') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_logistic_simplegp', r1_gamma=10.0) # Options for discriminator loss.
dataset = EasyDict() # Options for load_dataset().
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='4k', layout='random') # Options for setup_snapshot_image_grid().
metrics = [metric_base.fid50k] # Options for MetricGroup.
submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
# Dataset.
desc += '-ffhq'; dataset = EasyDict(tfrecord_dir='ffhq'); train.mirror_augment = True
# desc += '-cub'; dataset = EasyDict(tfrecord_dir='CUB'); train.mirror_augment = True
# desc += 'celebahq-binary'; dataset = EasyDict(tfrecord_dir='celebahq-binary', resolution=256); train.mirror_augment = True
#desc += 'coco_train'; dataset = EasyDict(tfrecord_dir='coco_train', resolution=256); train.mirror_augment = True
#desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True
#desc += '-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-full'); train.mirror_augment = False
#desc += '-car'; dataset = EasyDict(tfrecord_dir='lsun-car-512x384'); train.mirror_augment = False
#desc += '-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-full'); train.mirror_augment = False
# Number of GPUs.
#desc += '-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}
#desc += '-2gpu'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}
#desc += '-4gpu'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}
desc += '-8gpu'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}
# Class conditioning
# desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label
# desc += '-cond1'; dataset.max_label_size = 128 # conditioned on first component of the label
#desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8}
# Default options.
train.total_kimg = 25000
sched.lod_initial_resolution = 8
sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)
# WGAN-GP loss for CelebA-HQ.
#desc += '-wgangp'; G_loss = EasyDict(func_name='training.loss.G_wgan'); D_loss = EasyDict(func_name='training.loss.D_wgan_gp'); sched.G_lrate_dict = {k: min(v, 0.002) for k, v in sched.G_lrate_dict.items()}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)
# Table 1.
#desc += '-tuned-baseline'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-add-mapping-and-styles'; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-remove-traditional-input'; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-add-noise-inputs'; G.style_mixing_prob = 0.0
#desc += '-mixing-regularization' # default
# Table 2.
#desc += '-mix0'; G.style_mixing_prob = 0.0
#desc += '-mix50'; G.style_mixing_prob = 0.5
#desc += '-mix90'; G.style_mixing_prob = 0.9 # default
#desc += '-mix100'; G.style_mixing_prob = 1.0
# Table 4.
#desc += '-traditional-0'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-traditional-8'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 8; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-stylebased-0'; G.mapping_layers = 0
#desc += '-stylebased-1'; G.mapping_layers = 1
#desc += '-stylebased-2'; G.mapping_layers = 2
#desc += '-stylebased-8'; G.mapping_layers = 8 # default
#----------------------------------------------------------------------------
# Official training configs for Progressive GAN, targeted mainly for CelebA-HQ.
if 0:
desc = 'pgan' # Description string included in result subdir name.
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_progan.G_paper') # Options for generator network.
D = EasyDict(func_name='training.networks_progan.D_paper') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_wgan') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_wgan_gp') # Options for discriminator loss.
dataset = EasyDict() # Options for load_dataset().
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='1080p', layout='random') # Options for setup_snapshot_image_grid().
metrics = [metric_base.fid50k] # Options for MetricGroup.
submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
# Dataset (choose one).
desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True
#desc += '-celeba'; dataset = EasyDict(tfrecord_dir='celeba'); train.mirror_augment = True
#desc += '-cifar10'; dataset = EasyDict(tfrecord_dir='cifar10')
#desc += '-cifar100'; dataset = EasyDict(tfrecord_dir='cifar100')
#desc += '-svhn'; dataset = EasyDict(tfrecord_dir='svhn')
#desc += '-mnist'; dataset = EasyDict(tfrecord_dir='mnist')
#desc += '-mnistrgb'; dataset = EasyDict(tfrecord_dir='mnistrgb')
#desc += '-syn1024rgb'; dataset = EasyDict(class_name='training.dataset.SyntheticDataset', resolution=1024, num_channels=3)
#desc += '-lsun-airplane'; dataset = EasyDict(tfrecord_dir='lsun-airplane-100k'); train.mirror_augment = True
#desc += '-lsun-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-100k'); train.mirror_augment = True
#desc += '-lsun-bicycle'; dataset = EasyDict(tfrecord_dir='lsun-bicycle-100k'); train.mirror_augment = True
#desc += '-lsun-bird'; dataset = EasyDict(tfrecord_dir='lsun-bird-100k'); train.mirror_augment = True
#desc += '-lsun-boat'; dataset = EasyDict(tfrecord_dir='lsun-boat-100k'); train.mirror_augment = True
#desc += '-lsun-bottle'; dataset = EasyDict(tfrecord_dir='lsun-bottle-100k'); train.mirror_augment = True
#desc += '-lsun-bridge'; dataset = EasyDict(tfrecord_dir='lsun-bridge-100k'); train.mirror_augment = True
#desc += '-lsun-bus'; dataset = EasyDict(tfrecord_dir='lsun-bus-100k'); train.mirror_augment = True
#desc += '-lsun-car'; dataset = EasyDict(tfrecord_dir='lsun-car-100k'); train.mirror_augment = True
#desc += '-lsun-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-100k'); train.mirror_augment = True
#desc += '-lsun-chair'; dataset = EasyDict(tfrecord_dir='lsun-chair-100k'); train.mirror_augment = True
#desc += '-lsun-churchoutdoor'; dataset = EasyDict(tfrecord_dir='lsun-churchoutdoor-100k'); train.mirror_augment = True
#desc += '-lsun-classroom'; dataset = EasyDict(tfrecord_dir='lsun-classroom-100k'); train.mirror_augment = True
#desc += '-lsun-conferenceroom'; dataset = EasyDict(tfrecord_dir='lsun-conferenceroom-100k'); train.mirror_augment = True
#desc += '-lsun-cow'; dataset = EasyDict(tfrecord_dir='lsun-cow-100k'); train.mirror_augment = True
#desc += '-lsun-diningroom'; dataset = EasyDict(tfrecord_dir='lsun-diningroom-100k'); train.mirror_augment = True
#desc += '-lsun-diningtable'; dataset = EasyDict(tfrecord_dir='lsun-diningtable-100k'); train.mirror_augment = True
#desc += '-lsun-dog'; dataset = EasyDict(tfrecord_dir='lsun-dog-100k'); train.mirror_augment = True
#desc += '-lsun-horse'; dataset = EasyDict(tfrecord_dir='lsun-horse-100k'); train.mirror_augment = True
#desc += '-lsun-kitchen'; dataset = EasyDict(tfrecord_dir='lsun-kitchen-100k'); train.mirror_augment = True
#desc += '-lsun-livingroom'; dataset = EasyDict(tfrecord_dir='lsun-livingroom-100k'); train.mirror_augment = True
#desc += '-lsun-motorbike'; dataset = EasyDict(tfrecord_dir='lsun-motorbike-100k'); train.mirror_augment = True
#desc += '-lsun-person'; dataset = EasyDict(tfrecord_dir='lsun-person-100k'); train.mirror_augment = True
#desc += '-lsun-pottedplant'; dataset = EasyDict(tfrecord_dir='lsun-pottedplant-100k'); train.mirror_augment = True
#desc += '-lsun-restaurant'; dataset = EasyDict(tfrecord_dir='lsun-restaurant-100k'); train.mirror_augment = True
#desc += '-lsun-sheep'; dataset = EasyDict(tfrecord_dir='lsun-sheep-100k'); train.mirror_augment = True
#desc += '-lsun-sofa'; dataset = EasyDict(tfrecord_dir='lsun-sofa-100k'); train.mirror_augment = True
#desc += '-lsun-tower'; dataset = EasyDict(tfrecord_dir='lsun-tower-100k'); train.mirror_augment = True
#desc += '-lsun-train'; dataset = EasyDict(tfrecord_dir='lsun-train-100k'); train.mirror_augment = True
#desc += '-lsun-tvmonitor'; dataset = EasyDict(tfrecord_dir='lsun-tvmonitor-100k'); train.mirror_augment = True
# Conditioning & snapshot options.
#desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label
#desc += '-cond1'; dataset.max_label_size = 1 # conditioned on first component of the label
#desc += '-g4k'; grid.size = '4k'
#desc += '-grpc'; grid.layout = 'row_per_class'
# Config presets (choose one).
#desc += '-preset-v1-1gpu'; submit_config.num_gpus = 1; D.mbstd_group_size = 16; sched.minibatch_base = 16; sched.minibatch_dict = {256: 14, 512: 6, 1024: 3}; sched.lod_training_kimg = 800; sched.lod_transition_kimg = 800; train.total_kimg = 19000
desc += '-preset-v2-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-2gpus'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-4gpus'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-8gpus'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
# Numerical precision (choose one).
desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4}
#desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.pixelnorm_epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8}
# Disable individual features.
#desc += '-nogrowing'; sched.lod_initial_resolution = 1024; sched.lod_training_kimg = 0; sched.lod_transition_kimg = 0; train.total_kimg = 10000
#desc += '-nopixelnorm'; G.use_pixelnorm = False
#desc += '-nowscale'; G.use_wscale = False; D.use_wscale = False
#desc += '-noleakyrelu'; G.use_leakyrelu = False
#desc += '-nosmoothing'; train.G_smoothing_kimg = 0.0
#desc += '-norepeat'; train.minibatch_repeats = 1
#desc += '-noreset'; train.reset_opt_for_new_lod = False
# Special modes.
#desc += '-BENCHMARK'; sched.lod_initial_resolution = 4; sched.lod_training_kimg = 3; sched.lod_transition_kimg = 3; train.total_kimg = (8*2+1)*3; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
#desc += '-BENCHMARK0'; sched.lod_initial_resolution = 1024; train.total_kimg = 10; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
#desc += '-VERBOSE'; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1; train.network_snapshot_ticks = 100
#desc += '-GRAPH'; train.save_tf_graph = True
#desc += '-HIST'; train.save_weight_histograms = True
#----------------------------------------------------------------------------
# Main entry point for training.
# Calls the function indicated by 'train' using the selected options.
def main():
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(submit_config)
kwargs.submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
kwargs.submit_config.run_dir_ignore += config.run_dir_ignore
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 17,124 | 84.625 | 302 | py |
stylegan-encoder | stylegan-encoder-master/encode_images.py | import os
import argparse
import pickle
from tqdm import tqdm
import PIL.Image
from PIL import ImageFilter
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import config
from encoder.generator_model import Generator
from encoder.perceptual_model import PerceptualModel, load_images
#from tensorflow.keras.models import load_model
from keras.models import load_model
from keras.applications.resnet50 import preprocess_input
def split_to_batches(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser(description='Find latent representation of reference images using perceptual losses', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('src_dir', help='Directory with images for encoding')
parser.add_argument('generated_images_dir', help='Directory for storing generated images')
parser.add_argument('dlatent_dir', help='Directory for storing dlatent representations')
parser.add_argument('--data_dir', default='data', help='Directory for storing optional models')
parser.add_argument('--mask_dir', default='masks', help='Directory for storing optional masks')
parser.add_argument('--load_last', default='', help='Start with embeddings from directory')
parser.add_argument('--dlatent_avg', default='', help='Use dlatent from file specified here for truncation instead of dlatent_avg from Gs')
parser.add_argument('--model_url', default='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', help='Fetch a StyleGAN model to train on from this URL') # karras2019stylegan-ffhq-1024x1024.pkl
parser.add_argument('--model_res', default=1024, help='The dimension of images in the StyleGAN model', type=int)
parser.add_argument('--batch_size', default=1, help='Batch size for generator and perceptual model', type=int)
parser.add_argument('--optimizer', default='ggt', help='Optimization algorithm used for optimizing dlatents')
# Perceptual model params
parser.add_argument('--image_size', default=256, help='Size of images for perceptual model', type=int)
parser.add_argument('--resnet_image_size', default=256, help='Size of images for the Resnet model', type=int)
parser.add_argument('--lr', default=0.25, help='Learning rate for perceptual model', type=float)
parser.add_argument('--decay_rate', default=0.9, help='Decay rate for learning rate', type=float)
parser.add_argument('--iterations', default=100, help='Number of optimization steps for each batch', type=int)
parser.add_argument('--decay_steps', default=4, help='Decay steps for learning rate decay (as a percent of iterations)', type=float)
parser.add_argument('--early_stopping', default=True, help='Stop early once training stabilizes', type=str2bool, nargs='?', const=True)
parser.add_argument('--early_stopping_threshold', default=0.5, help='Stop after this threshold has been reached', type=float)
parser.add_argument('--early_stopping_patience', default=10, help='Number of iterations to wait below threshold', type=int)
parser.add_argument('--load_effnet', default='data/finetuned_effnet.h5', help='Model to load for EfficientNet approximation of dlatents')
parser.add_argument('--load_resnet', default='data/finetuned_resnet.h5', help='Model to load for ResNet approximation of dlatents')
parser.add_argument('--use_preprocess_input', default=True, help='Call process_input() first before using feed forward net', type=str2bool, nargs='?', const=True)
parser.add_argument('--use_best_loss', default=True, help='Output the lowest loss value found as the solution', type=str2bool, nargs='?', const=True)
parser.add_argument('--average_best_loss', default=0.25, help='Do a running weighted average with the previous best dlatents found', type=float)
parser.add_argument('--sharpen_input', default=True, help='Sharpen the input images', type=str2bool, nargs='?', const=True)
# Loss function options
parser.add_argument('--use_vgg_loss', default=0.4, help='Use VGG perceptual loss; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_vgg_layer', default=9, help='Pick which VGG layer to use.', type=int)
parser.add_argument('--use_pixel_loss', default=1.5, help='Use logcosh image pixel loss; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_mssim_loss', default=200, help='Use MS-SIM perceptual loss; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_lpips_loss', default=100, help='Use LPIPS perceptual loss; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_l1_penalty', default=0.5, help='Use L1 penalty on latents; 0 to disable, > 0 to scale.', type=float)
parser.add_argument('--use_discriminator_loss', default=0.5, help='Use trained discriminator to evaluate realism.', type=float)
parser.add_argument('--use_adaptive_loss', default=False, help='Use the adaptive robust loss function from Google Research for pixel and VGG feature loss.', type=str2bool, nargs='?', const=True)
# Generator params
parser.add_argument('--randomize_noise', default=False, help='Add noise to dlatents during optimization', type=str2bool, nargs='?', const=True)
parser.add_argument('--tile_dlatents', default=False, help='Tile dlatents to use a single vector at each scale', type=str2bool, nargs='?', const=True)
parser.add_argument('--clipping_threshold', default=2.0, help='Stochastic clipping of gradient values outside of this threshold', type=float)
# Masking params
parser.add_argument('--load_mask', default=False, help='Load segmentation masks', type=str2bool, nargs='?', const=True)
parser.add_argument('--face_mask', default=True, help='Generate a mask for predicting only the face area', type=str2bool, nargs='?', const=True)
parser.add_argument('--use_grabcut', default=True, help='Use grabcut algorithm on the face mask to better segment the foreground', type=str2bool, nargs='?', const=True)
parser.add_argument('--scale_mask', default=1.4, help='Look over a wider section of foreground for grabcut', type=float)
parser.add_argument('--composite_mask', default=True, help='Merge the unmasked area back into the generated image', type=str2bool, nargs='?', const=True)
parser.add_argument('--composite_blur', default=8, help='Size of blur filter to smoothly composite the images', type=int)
# Video params
parser.add_argument('--video_dir', default='videos', help='Directory for storing training videos')
parser.add_argument('--output_video', default=False, help='Generate videos of the optimization process', type=bool)
parser.add_argument('--video_codec', default='MJPG', help='FOURCC-supported video codec name')
parser.add_argument('--video_frame_rate', default=24, help='Video frames per second', type=int)
parser.add_argument('--video_size', default=512, help='Video size in pixels', type=int)
parser.add_argument('--video_skip', default=1, help='Only write every n frames (1 = write every frame)', type=int)
args, other_args = parser.parse_known_args()
args.decay_steps *= 0.01 * args.iterations # Calculate steps as a percent of total iterations
if args.output_video:
import cv2
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=args.batch_size)
ref_images = [os.path.join(args.src_dir, x) for x in os.listdir(args.src_dir)]
ref_images = list(filter(os.path.isfile, ref_images))
if len(ref_images) == 0:
raise Exception('%s is empty' % args.src_dir)
os.makedirs(args.data_dir, exist_ok=True)
os.makedirs(args.mask_dir, exist_ok=True)
os.makedirs(args.generated_images_dir, exist_ok=True)
os.makedirs(args.dlatent_dir, exist_ok=True)
os.makedirs(args.video_dir, exist_ok=True)
# Initialize generator and perceptual model
tflib.init_tf()
with dnnlib.util.open_url(args.model_url, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
generator = Generator(Gs_network, args.batch_size, clipping_threshold=args.clipping_threshold, tiled_dlatent=args.tile_dlatents, model_res=args.model_res, randomize_noise=args.randomize_noise)
if (args.dlatent_avg != ''):
generator.set_dlatent_avg(np.load(args.dlatent_avg))
perc_model = None
if (args.use_lpips_loss > 0.00000001):
with dnnlib.util.open_url('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2', cache_dir=config.cache_dir) as f:
perc_model = pickle.load(f)
perceptual_model = PerceptualModel(args, perc_model=perc_model, batch_size=args.batch_size)
perceptual_model.build_perceptual_model(generator, discriminator_network)
ff_model = None
# Optimize (only) dlatents by minimizing perceptual loss between reference and generated images in feature space
for images_batch in tqdm(split_to_batches(ref_images, args.batch_size), total=len(ref_images)//args.batch_size):
names = [os.path.splitext(os.path.basename(x))[0] for x in images_batch]
if args.output_video:
video_out = {}
for name in names:
video_out[name] = cv2.VideoWriter(os.path.join(args.video_dir, f'{name}.avi'),cv2.VideoWriter_fourcc(*args.video_codec), args.video_frame_rate, (args.video_size,args.video_size))
perceptual_model.set_reference_images(images_batch)
dlatents = None
if (args.load_last != ''): # load previous dlatents for initialization
for name in names:
dl = np.expand_dims(np.load(os.path.join(args.load_last, f'{name}.npy')),axis=0)
if (dlatents is None):
dlatents = dl
else:
dlatents = np.vstack((dlatents,dl))
else:
if (ff_model is None):
if os.path.exists(args.load_resnet):
from keras.applications.resnet50 import preprocess_input
print("Loading ResNet Model:")
ff_model = load_model(args.load_resnet)
if (ff_model is None):
if os.path.exists(args.load_effnet):
import efficientnet
from efficientnet import preprocess_input
print("Loading EfficientNet Model:")
ff_model = load_model(args.load_effnet)
if (ff_model is not None): # predict initial dlatents with ResNet model
if (args.use_preprocess_input):
dlatents = ff_model.predict(preprocess_input(load_images(images_batch,image_size=args.resnet_image_size)))
else:
dlatents = ff_model.predict(load_images(images_batch,image_size=args.resnet_image_size))
if dlatents is not None:
generator.set_dlatents(dlatents)
op = perceptual_model.optimize(generator.dlatent_variable, iterations=args.iterations, use_optimizer=args.optimizer)
pbar = tqdm(op, leave=False, total=args.iterations)
vid_count = 0
best_loss = None
best_dlatent = None
avg_loss_count = 0
if args.early_stopping:
avg_loss = prev_loss = None
for loss_dict in pbar:
if args.early_stopping: # early stopping feature
if prev_loss is not None:
if avg_loss is not None:
avg_loss = 0.5 * avg_loss + (prev_loss - loss_dict["loss"])
if avg_loss < args.early_stopping_threshold: # count while under threshold; else reset
avg_loss_count += 1
else:
avg_loss_count = 0
if avg_loss_count > args.early_stopping_patience: # stop once threshold is reached
print("")
break
else:
avg_loss = prev_loss - loss_dict["loss"]
pbar.set_description(" ".join(names) + ": " + "; ".join(["{} {:.4f}".format(k, v) for k, v in loss_dict.items()]))
if best_loss is None or loss_dict["loss"] < best_loss:
if best_dlatent is None or args.average_best_loss <= 0.00000001:
best_dlatent = generator.get_dlatents()
else:
best_dlatent = 0.25 * best_dlatent + 0.75 * generator.get_dlatents()
if args.use_best_loss:
generator.set_dlatents(best_dlatent)
best_loss = loss_dict["loss"]
if args.output_video and (vid_count % args.video_skip == 0):
batch_frames = generator.generate_images()
for i, name in enumerate(names):
video_frame = PIL.Image.fromarray(batch_frames[i], 'RGB').resize((args.video_size,args.video_size),PIL.Image.LANCZOS)
video_out[name].write(cv2.cvtColor(np.array(video_frame).astype('uint8'), cv2.COLOR_RGB2BGR))
generator.stochastic_clip_dlatents()
prev_loss = loss_dict["loss"]
if not args.use_best_loss:
best_loss = prev_loss
print(" ".join(names), " Loss {:.4f}".format(best_loss))
if args.output_video:
for name in names:
video_out[name].release()
# Generate images from found dlatents and save them
if args.use_best_loss:
generator.set_dlatents(best_dlatent)
generated_images = generator.generate_images()
generated_dlatents = generator.get_dlatents()
for img_array, dlatent, img_path, img_name in zip(generated_images, generated_dlatents, images_batch, names):
mask_img = None
if args.composite_mask and (args.load_mask or args.face_mask):
_, im_name = os.path.split(img_path)
mask_img = os.path.join(args.mask_dir, f'{im_name}')
if args.composite_mask and mask_img is not None and os.path.isfile(mask_img):
orig_img = PIL.Image.open(img_path).convert('RGB')
width, height = orig_img.size
imask = PIL.Image.open(mask_img).convert('L').resize((width, height))
imask = imask.filter(ImageFilter.GaussianBlur(args.composite_blur))
mask = np.array(imask)/255
mask = np.expand_dims(mask,axis=-1)
img_array = mask*np.array(img_array) + (1.0-mask)*np.array(orig_img)
img_array = img_array.astype(np.uint8)
#img_array = np.where(mask, np.array(img_array), orig_img)
img = PIL.Image.fromarray(img_array, 'RGB')
img.save(os.path.join(args.generated_images_dir, f'{img_name}.png'), 'PNG')
np.save(os.path.join(args.dlatent_dir, f'{img_name}.npy'), dlatent)
generator.reset_dlatents()
if __name__ == "__main__":
main()
| 15,281 | 62.14876 | 211 | py |
stylegan-encoder | stylegan-encoder-master/robust_loss/distribution.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Implements the distribution corresponding to the loss function.
This library implements the parts of Section 2 of "A General and Adaptive Robust
Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077, that are
required for evaluating the negative log-likelihood (NLL) of the distribution
and for sampling from the distribution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import mpmath
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from robust_loss import cubic_spline
from robust_loss import general
from robust_loss import util
def analytical_base_partition_function(numer, denom):
r"""Accurately approximate the partition function Z(numer / denom).
This uses the analytical formulation of the true partition function Z(alpha),
as described in the paper (the math after Equation 18), where alpha is a
positive rational value numer/denom. This is expensive to compute and not
differentiable, so it's not implemented in TensorFlow and is only used for
unit tests.
Args:
numer: the numerator of alpha, an integer >= 0.
denom: the denominator of alpha, an integer > 0.
Returns:
Z(numer / denom), a double-precision float, accurate to around 9 digits
of precision.
Raises:
ValueError: If `numer` is not a non-negative integer or if `denom` is not
a positive integer.
"""
if not isinstance(numer, numbers.Integral):
raise ValueError('Expected `numer` of type int, but is of type {}'.format(
type(numer)))
if not isinstance(denom, numbers.Integral):
raise ValueError('Expected `denom` of type int, but is of type {}'.format(
type(denom)))
if not numer >= 0:
raise ValueError('Expected `numer` >= 0, but is = {}'.format(numer))
if not denom > 0:
raise ValueError('Expected `denom` > 0, but is = {}'.format(denom))
alpha = numer / denom
# The Meijer-G formulation of the partition function has singularities at
# alpha = 0 and alpha = 2, but at those special cases the partition function
# has simple closed forms which we special-case here.
if alpha == 0:
return np.pi * np.sqrt(2)
if alpha == 2:
return np.sqrt(2 * np.pi)
# Z(n/d) as described in the paper.
a_p = (np.arange(1, numer, dtype=np.float64) / numer).tolist()
b_q = ((np.arange(-0.5, numer - 0.5, dtype=np.float64)) /
numer).tolist() + (np.arange(1, 2 * denom, dtype=np.float64) /
(2 * denom)).tolist()
z = (1. / numer - 1. / (2 * denom))**(2 * denom)
mult = np.exp(np.abs(2 * denom / numer - 1.)) * np.sqrt(
np.abs(2 * denom / numer - 1.)) * (2 * np.pi)**(1 - denom)
return mult * np.float64(mpmath.meijerg([[], a_p], [b_q, []], z))
def partition_spline_curve(alpha):
"""Applies a curve to alpha >= 0 to compress its range before interpolation.
This is a weird hand-crafted function designed to take in alpha values and
curve them to occupy a short finite range that works well when using spline
interpolation to model the partition function Z(alpha). Because Z(alpha)
is only varied in [0, 4] and is especially interesting around alpha=2, this
curve is roughly linear in [0, 4] with a slope of ~1 at alpha=0 and alpha=4
but a slope of ~10 at alpha=2. When alpha > 4 the curve becomes logarithmic.
Some (input, output) pairs for this function are:
[(0, 0), (1, ~1.2), (2, 4), (3, ~6.8), (4, 8), (8, ~8.8), (400000, ~12)]
This function is continuously differentiable.
Args:
alpha: A numpy array or TF tensor (float32 or float64) with values >= 0.
Returns:
An array/tensor of curved values >= 0 with the same type as `alpha`, to be
used as input x-coordinates for spline interpolation.
"""
c = lambda z: tf.cast(z, alpha.dtype)
assert_ops = [tf.Assert(tf.reduce_all(alpha >= 0.), [alpha])]
with tf.control_dependencies(assert_ops):
x = tf.where(alpha < 4, (c(2.25) * alpha - c(4.5)) /
(tf.abs(alpha - c(2)) + c(0.25)) + alpha + c(2),
c(5) / c(18) * util.log_safe(c(4) * alpha - c(15)) + c(8))
return x
def inv_partition_spline_curve(x):
"""The inverse of partition_spline_curve()."""
c = lambda z: tf.cast(z, x.dtype)
assert_ops = [tf.Assert(tf.reduce_all(x >= 0.), [x])]
with tf.control_dependencies(assert_ops):
alpha = tf.where(
x < 8,
c(0.5) * x + tf.where(
x <= 4,
c(1.25) - tf.sqrt(c(1.5625) - x + c(.25) * tf.square(x)),
c(-1.25) + tf.sqrt(c(9.5625) - c(3) * x + c(.25) * tf.square(x))),
c(3.75) + c(0.25) * util.exp_safe(x * c(3.6) - c(28.8)))
return alpha
def log_base_partition_function(alpha):
r"""Approximate the distribution's log-partition function with a 1D spline.
Because the partition function (Z(\alpha) in the paper) of the distribution is
difficult to model analytically, we approximate it with a (transformed) cubic
hermite spline: Each alpha is pushed through a nonlinearity before being used
to interpolate into a spline, which allows us to use a relatively small spline
to accurately model the log partition function over the range of all
non-negative input values.
Args:
alpha: A tensor or scalar of single or double precision floats containing
the set of alphas for which we would like an approximate log partition
function. Must be non-negative, as the partition function is undefined
when alpha < 0.
Returns:
An approximation of log(Z(alpha)) accurate to within 1e-6
"""
float_dtype = alpha.dtype
# Load the values, tangents, and x-coordinate scaling of a spline that
# approximates the partition function. This was produced by running
# the script in fit_partition_spline.py
with util.get_resource_as_file(
'robust_loss/data/partition_spline.npz') as spline_file:
with np.load(spline_file, allow_pickle=False) as f:
x_scale = tf.cast(f['x_scale'], float_dtype)
values = tf.cast(f['values'], float_dtype)
tangents = tf.cast(f['tangents'], float_dtype)
# The partition function is undefined when `alpha`< 0.
assert_ops = [tf.Assert(tf.reduce_all(alpha >= 0.), [alpha])]
with tf.control_dependencies(assert_ops):
# Transform `alpha` to the form expected by the spline.
x = partition_spline_curve(alpha)
# Interpolate into the spline.
return cubic_spline.interpolate1d(x * x_scale, values, tangents)
def nllfun(x, alpha, scale):
r"""Implements the negative log-likelihood (NLL).
Specifically, we implement -log(p(x | 0, \alpha, c) of Equation 16 in the
paper as nllfun(x, alpha, shape).
Args:
x: The residual for which the NLL is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
Must be a tensorflow tensor or numpy array of floats.
alpha: The shape parameter of the NLL (\alpha in the paper), where more
negative values cause outliers to "cost" more and inliers to "cost" less.
Alpha can be any non-negative value, but the gradient of the NLL with
respect to alpha has singularities at 0 and 2 so you may want to limit
usage to (0, 2) during gradient descent. Must be a tensorflow tensor or
numpy array of floats. Varying alpha in that range allows for smooth
interpolation between a Cauchy distribution (alpha = 0) and a Normal
distribution (alpha = 2) similar to a Student's T distribution.
scale: The scale parameter of the loss. When |x| < scale, the NLL is like
that of a (possibly unnormalized) normal distribution, and when |x| >
scale the NLL takes on a different shape according to alpha. Must be a
tensorflow tensor or numpy array of floats.
Returns:
The NLLs for each element of x, in the same shape as x. This is returned
as a TensorFlow graph node of floats with the same precision as x.
"""
# `scale` and `alpha` must have the same type as `x`.
tf.assert_type(scale, x.dtype)
tf.assert_type(alpha, x.dtype)
assert_ops = [
# `scale` must be > 0.
tf.Assert(tf.reduce_all(scale > 0.), [scale]),
# `alpha` must be >= 0.
tf.Assert(tf.reduce_all(alpha >= 0.), [alpha]),
]
with tf.control_dependencies(assert_ops):
loss = general.lossfun(x, alpha, scale, approximate=False)
log_partition = tf.math.log(scale) + log_base_partition_function(alpha)
nll = loss + log_partition
return nll
def draw_samples(alpha, scale):
r"""Draw samples from the robust distribution.
This function implements Algorithm 1 the paper. This code is written to allow
for sampling from a set of different distributions, each parametrized by its
own alpha and scale values, as opposed to the more standard approach of
drawing N samples from the same distribution. This is done by repeatedly
performing N instances of rejection sampling for each of the N distributions
until at least one proposal for each of the N distributions has been accepted.
All samples are drawn with a zero mean, to use a non-zero mean just add each
mean to each sample.
Args:
alpha: A TF tensor/scalar or numpy array/scalar of floats where each element
is the shape parameter of that element's distribution.
scale: A TF tensor/scalar or numpy array/scalar of floats where each element
is the scale parameter of that element's distribution. Must be the same
shape as `alpha`.
Returns:
A TF tensor with the same shape and precision as `alpha` and `scale` where
each element is a sample drawn from the distribution specified for that
element by `alpha` and `scale`.
"""
# `scale` must have the same type as `alpha`.
float_dtype = alpha.dtype
tf.assert_type(scale, float_dtype)
assert_ops = [
# `scale` must be > 0.
tf.Assert(tf.reduce_all(scale > 0.), [scale]),
# `alpha` must be >= 0.
tf.Assert(tf.reduce_all(alpha >= 0.), [alpha]),
# `alpha` and `scale` must have the same shape.
tf.Assert(
tf.reduce_all(tf.equal(tf.shape(alpha), tf.shape(scale))),
[tf.shape(alpha), tf.shape(scale)]),
]
with tf.control_dependencies(assert_ops):
shape = tf.shape(alpha)
# The distributions we will need for rejection sampling. The sqrt(2) scaling
# of the Cauchy distribution corrects for our differing conventions for
# standardization.
cauchy = tfp.distributions.Cauchy(loc=0., scale=tf.sqrt(2.))
uniform = tfp.distributions.Uniform(low=0., high=1.)
def while_cond(_, accepted):
"""Terminate the loop only when all samples have been accepted."""
return ~tf.reduce_all(accepted)
def while_body(samples, accepted):
"""Generate N proposal samples, and then perform rejection sampling."""
# Draw N samples from a Cauchy, our proposal distribution.
cauchy_sample = tf.cast(cauchy.sample(shape), float_dtype)
# Compute the likelihood of each sample under its target distribution.
nll = nllfun(cauchy_sample, alpha, tf.cast(1, float_dtype))
# Bound the NLL. We don't use the approximate loss as it may cause
# unpredictable behavior in the context of sampling.
nll_bound = general.lossfun(
cauchy_sample,
tf.cast(0, float_dtype),
tf.cast(1, float_dtype),
approximate=False) + log_base_partition_function(alpha)
# Draw N samples from a uniform distribution, and use each uniform sample
# to decide whether or not to accept each proposal sample.
uniform_sample = tf.cast(uniform.sample(shape), float_dtype)
accept = uniform_sample <= tf.math.exp(nll_bound - nll)
# If a sample is accepted, replace its element in `samples` with the
# proposal sample, and set its bit in `accepted` to True.
samples = tf.where(accept, cauchy_sample, samples)
accepted = accept | accepted
return (samples, accepted)
# Initialize the loop. The first item does not matter as it will get
# overwritten, the second item must be all False.
while_loop_vars = (tf.zeros(shape, float_dtype),
tf.zeros(shape, dtype=bool))
# Perform rejection sampling until all N samples have been accepted.
terminal_state = tf.while_loop(
cond=while_cond, body=while_body, loop_vars=while_loop_vars)
# Because our distribution is a location-scale family, we sample from
# p(x | 0, \alpha, 1) and then scale each sample by `scale`.
samples = tf.multiply(terminal_state[0], scale)
return samples
| 13,191 | 41.554839 | 80 | py |
stylegan-encoder | stylegan-encoder-master/robust_loss/wavelet.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements wavelets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
# The four filters used to define a wavelet decomposition:
# (analysis, synthesis) x (highpass, lowpass)
Filters = collections.namedtuple(
'Filters', ['analysis_lo', 'analysis_hi', 'synthesis_lo', 'synthesis_hi'])
# How we're storing the non-redundant parts of a wavelet filter bank. The
# center of the filter is at the beginning, and the rest is symmetrized.
HalfFilters = collections.namedtuple('HalfFilters', ['lo', 'hi'])
def generate_filters(wavelet_type=None):
"""Generates the analysis and synthesis filters for a kind of wavelet.
Currently only supports wavelet types where all filters have an odd length.
TODO(barron): Generalize this to even filters as well, and support Haar and
Debauchies wavelets.
Args:
wavelet_type: A string encoding the type of wavelet filters to return. This
string is used as a key to the `supported_half_filters` dict in the code
below, and so the string must be a valid key.
Returns:
If `wavelet_type` is not provided as input, this function returns a list of
valid values for `wavelet_type`. If `wavelet_type` is a supported string,
this function returns the wavelet type's `Filters` object.
"""
supported_half_filters = {
# CDF 9/7 filters from "Biorthogonal bases of compactly supported
# wavelets", Cohen et al., Commun. Pure Appl. Math 1992.
'CDF9/7':
HalfFilters(
lo=np.array([
+0.852698679009,
+0.377402855613,
-0.110624404418,
-0.023849465020,
+0.037828455507
]),
hi=np.array([
+0.788485616406,
-0.418092273222,
-0.040689417609,
+0.064538882629
])),
# Le Gall 5/3 filters (sometimes called CDF 5/3 filters).
'LeGall5/3':
HalfFilters(
lo=np.array([0.75, 0.25, -0.125]) * np.sqrt(2.),
hi=np.array([1., -0.5]) / np.sqrt(2.)),
} # pyformat: disable
if wavelet_type is None:
return list(supported_half_filters.keys())
half_filters = supported_half_filters[wavelet_type]
# Returns [f(n-1), ..., f(2), f(1), f(0), f(1), f(2), ... f(n-1)].
mirror = lambda f: np.concatenate([f[-1:0:-1], f])
# Makes an n-length vector containing [1, -1, 1, -1, 1, ... ].
alternating_sign = lambda n: (-1)**np.arange(n)
analysis_lo = mirror(half_filters.lo)
analysis_hi = mirror(half_filters.hi)
synthesis_lo = analysis_hi * mirror(alternating_sign(len(half_filters.hi)))
synthesis_hi = analysis_lo * mirror(alternating_sign(len(half_filters.lo)))
return Filters(
analysis_lo=analysis_lo,
analysis_hi=analysis_hi,
synthesis_lo=synthesis_lo,
synthesis_hi=synthesis_hi)
def pad_reflecting(x, padding_below, padding_above, axis):
"""Pads `x` with reflecting conditions above and/or below it along some axis.
Pads `x` with reflecting conditions for `padding_below` entries below the
tensor and `padding_above` entries above the tensor in the direction along
`axis`. This is like using tf.pad(x, --, 'REFLECT'), except that this code
allows for an unbounded number of reflections while tf.pad() only supports
one reflection. Multiple reflections are necessary for for wavelet
decompositions to guard against cases where the wavelet filters are larger
than the input tensor along `axis`, which happens often at coarse scales.
Note that "reflecting" boundary conditions are different from "symmetric"
boundary conditions, in that it doesn't repeat the last element:
reflect([A, B, C, D], 2) = [C, B, A, B, C, D, C, B]
symmet.([A, B, C, D], 2) = [B, A, A, B, C, D, D, C]
Args:
x: The tensor to be padded with reflecting boundary conditions.
padding_below: The number of elements being padded below the tensor.
padding_above: The number of elements being padded above the tensor.
axis: The axis in x in which padding will be performed.
Returns:
`x` padded according to `padding_below` and `padding_above` along `axis`
with reflecting boundary conditions.
"""
if not isinstance(padding_below, int):
raise ValueError(
'Expected `padding_below` of type int, but is of type {}'.format(
type(padding_below)))
if not isinstance(padding_above, int):
raise ValueError(
'Expected `padding_above` of type int, but is of type {}'.format(
type(padding_above)))
if not isinstance(axis, int):
raise ValueError('Expected `axis` of type int, but is of type {}'.format(
type(axis)))
if not (axis >= 0 and axis < len(x.shape)):
raise ValueError('Expected `axis` in [0, {}], but is = {}'.format(
len(x.shape) - 1, axis))
if padding_below == 0 and padding_above == 0:
return tf.convert_to_tensor(x)
n = tf.shape(x)[axis]
# `i' contains the indices of the output padded tensor in the frame of
# reference of the input tensor.
i = tf.range(-padding_below, n + padding_above, dtype=tf.int32)
# `j` contains the indices of the input tensor corresponding to the output
# padded tensor.
i_mod = tf.mod(i, tf.maximum(1, 2 * (n - 1)))
j = tf.minimum(2 * (n - 1) - i_mod, i_mod)
return tf.gather(x, j, axis=axis)
def _check_resample_inputs(x, f, direction, shift):
"""Checks the inputs to _downsample() and _upsample()."""
if len(x.shape) != 3:
raise ValueError('Expected `x` to have rank 3, but is of size {}'.format(
x.shape))
if len(f.shape) != 1:
raise ValueError('Expected `f` to have rank 1, but is of size {}'.format(
f.shape))
if not (direction == 0 or direction == 1):
raise ValueError(
'Expected `direction` to be 0 or 1, but is {}'.format(direction))
if not (shift == 0 or shift == 1):
raise ValueError('Expected `shift` to be 0 or 1, but is {}'.format(shift))
def _downsample(x, f, direction, shift):
"""Downsample by a factor of 2 using reflecting boundary conditions.
This function convolves `x` with filter `f` with reflecting boundary
conditions, and then decimates by a factor of 2. This is usually done to
downsample `x`, assuming `f` is some smoothing filter, but will also be used
for wavelet transformations in which `f` is not a smoothing filter.
Args:
x: The input tensor (numpy or TF), of size (num_channels, width, height).
f: The input filter, which must be an odd-length 1D numpy array.
direction: The spatial direction in [0, 1] along which `x` will be convolved
with `f` and then decimated. Because `x` has a batch/channels dimension,
`direction` == 0 corresponds to downsampling along axis 1 in `x`, and
`direction` == 1 corresponds to downsampling along axis 2 in `x`.
shift: A shift amount in [0, 1] by which `x` will be shifted along the axis
specified by `direction` before filtering.
Returns:
`x` convolved with `f` along the spatial dimension `direction` with
reflection boundary conditions with an offset of `shift`.
"""
_check_resample_inputs(x, f, direction, shift)
assert_ops = [tf.Assert(tf.equal(tf.rank(f), 1), [tf.rank(f)])]
with tf.control_dependencies(assert_ops):
# The above and below padding amounts are different so as to support odd
# and even length filters. An odd-length filter of length n causes a padding
# of (n-1)/2 on both sides, while an even-length filter will pad by one less
# below than above.
x_padded = pad_reflecting(x, (len(f) - 1) // 2, len(f) // 2, direction + 1)
if direction == 0:
x_padded = x_padded[:, shift:, :]
f_ex = f[:, tf.newaxis]
strides = [1, 2, 1, 1]
elif direction == 1:
x_padded = x_padded[:, :, shift:]
f_ex = f[tf.newaxis, :]
strides = [1, 1, 2, 1]
y = tf.nn.conv2d(x_padded[:, :, :, tf.newaxis],
tf.cast(f_ex, x.dtype)[:, :, tf.newaxis, tf.newaxis],
strides, 'VALID')[:, :, :, 0]
return y
def _upsample(x, up_sz, f, direction, shift):
"""Upsample by a factor of 2 using transposed reflecting boundary conditions.
This function undecimates `x` along the axis specified by `direction` and then
convolves it with filter `f`, thereby upsampling it to have a size of `up_sz`.
This function is a bit awkward, as it's written to be the transpose of
_downsample(), which uses reflecting boundary conditions. As such, this
function approximates *the transpose of reflecting boundary conditions*, which
is not the same as reflecting boundary conditions.
TODO(barron): Write out the true transpose of reflecting boundary conditions.
Args:
x: The input tensor (numpy or TF), of size (num_channels, width, height).
up_sz: A tuple of ints of size (upsampled_width, upsampled_height). Care
should be taken by the caller to match the upsampled_width/height with the
input width/height along the axis that isn't being upsampled.
f: The input filter, which must be an odd-length 1D numpy array.
direction: The spatial direction in [0, 1] along which `x` will be convolved
with `f` after being undecimated. Because `x` has a batch/channels
dimension, `direction` == 0 corresponds to downsampling along axis 1 in
`x`, and `direction` == 1 corresponds to downsampling along axis 2 in `x`.
shift: A shift amount in [0, 1] by which `x` will be shifted along the axis
specified by `direction` after undecimating.
Returns:
`x` undecimated and convolved with `f` along the spatial dimension
`direction` with transposed reflection boundary conditions with an offset of
`shift`, to match size `up_sz`.
"""
_check_resample_inputs(x, f, direction, shift)
assert_ops = [tf.Assert(tf.equal(tf.rank(f), 1), [tf.rank(f)])]
with tf.control_dependencies(assert_ops):
# Undecimate `x` by a factor of 2 along `direction`, by stacking it with
# and tensor of all zeros along the right axis and then reshaping it such
# that the zeros are interleaved.
if direction == 0:
sz_ex = tf.shape(x) * [1, 2, 1]
elif direction == 1:
sz_ex = tf.shape(x) * [1, 1, 2]
if shift == 0:
x_and_zeros = [x, tf.zeros_like(x)]
elif shift == 1:
x_and_zeros = [tf.zeros_like(x), x]
x_undecimated = tf.reshape(tf.stack(x_and_zeros, direction + 2), sz_ex)
# Ensure that `x_undecimated` has a size of `up_sz`, by slicing and padding
# as needed.
x_undecimated = x_undecimated[:, 0:up_sz[0], 0:up_sz[1]]
x_undecimated = tf.pad(x_undecimated,
[[0, 0], [0, up_sz[0] - tf.shape(x_undecimated)[1]],
[0, up_sz[1] - tf.shape(x_undecimated)[2]]])
# Pad `x_undecimated` with reflection boundary conditions.
x_padded = pad_reflecting(x_undecimated,
len(f) // 2, (len(f) - 1) // 2, direction + 1)
# Convolved x_undecimated with a flipped version of f.
f_ex = tf.expand_dims(f[::-1], 1 - direction)
y = tf.nn.conv2d(x_padded[:, :, :, tf.newaxis],
tf.cast(f_ex, x.dtype)[:, :, tf.newaxis, tf.newaxis],
[1, 1, 1, 1], 'VALID')[:, :, :, 0]
return y
def get_max_num_levels(sz):
"""Returns the maximum number of levels that construct() can support.
Args:
sz: A tuple of ints representing some input size (batch, width, height).
Returns:
The maximum value for num_levels, when calling construct(im, num_levels),
assuming `sz` is the shape of `im`.
"""
min_sz = tf.minimum(sz[1], sz[2])
log2 = lambda x: tf.math.log(tf.cast(x, tf.float32)) / tf.math.log(2.)
max_num_levels = tf.cast(tf.math.ceil(log2(tf.maximum(1, min_sz))), tf.int32)
return max_num_levels
def construct(im, num_levels, wavelet_type):
"""Constructs a wavelet decomposition of an image.
Args:
im: A numpy or TF tensor of single or double precision floats of size
(batch_size, width, height)
num_levels: The number of levels (or scales) of the wavelet decomposition to
apply. A value of 0 returns a "wavelet decomposition" that is just the
image.
wavelet_type: The kind of wavelet to use, see generate_filters().
Returns:
A wavelet decomposition of `im` that has `num_levels` levels (not including
the coarsest residual level) and is of type `wavelet_type`. This
decomposition is represented as a tuple of 3-tuples, with the final element
being a tensor:
((band00, band01, band02), (band10, band11, band12), ..., resid)
Where band** and resid are TF tensors. Each element of these nested tuples
is of shape [batch_size, width * 2^-(level+1), height * 2^-(level+1)],
though the spatial dimensions may be off by 1 if width and height are not
factors of 2. The residual image is of the same (rough) size as the last set
of bands. The floating point precision of these tensors matches that of
`im`.
"""
if len(im.shape) != 3:
raise ValueError(
'Expected `im` to have a rank of 3, but is of size {}'.format(im.shape))
if num_levels == 0:
return (tf.convert_to_tensor(im),)
max_num_levels = get_max_num_levels(tf.shape(im))
assert_ops = [
tf.Assert(
tf.greater_equal(max_num_levels, num_levels),
[tf.shape(im), num_levels, max_num_levels])
]
with tf.control_dependencies(assert_ops):
filters = generate_filters(wavelet_type)
pyr = []
for _ in range(num_levels):
hi = _downsample(im, filters.analysis_hi, 0, 1)
lo = _downsample(im, filters.analysis_lo, 0, 0)
pyr.append((_downsample(hi, filters.analysis_hi, 1, 1),
_downsample(lo, filters.analysis_hi, 1, 1),
_downsample(hi, filters.analysis_lo, 1, 0)))
im = _downsample(lo, filters.analysis_lo, 1, 0)
pyr.append(im)
pyr = tuple(pyr)
return pyr
def collapse(pyr, wavelet_type):
"""Collapses a wavelet decomposition made by construct() back into an image.
Args:
pyr: A numpy or TF tensor of single or double precision floats containing a
wavelet decomposition produced by construct().
wavelet_type: The kind of wavelet to use, see generate_filters().
Returns:
A TF tensor of a reconstructed image, with the same floating point precision
as the element of `pyr`, and the same size as the image that was used to
create `pyr`.
"""
if not isinstance(pyr, (list, tuple)):
raise ValueError('Expected `pyr` to be a list or tuple, but is a {}'.format(
type(pyr)))
filters = generate_filters(wavelet_type)
im = pyr[-1]
num_levels = len(pyr) - 1
for d in range(num_levels - 1, -1, -1):
if not isinstance(pyr[d], (list, tuple)):
raise ValueError(
'Expected `pyr[{}]` to be a list or tuple, but is a {}'.format(
d, type(pyr[d])))
if len(pyr[d]) != 3:
raise ValueError(
'Expected `pyr[{}]` to have length 3, but has length {}'.format(
d, len(pyr[d])))
hi_hi, hi_lo, lo_hi = pyr[d]
up_sz = (tf.shape(hi_lo)[1] + tf.shape(lo_hi)[1],
tf.shape(lo_hi)[2] + tf.shape(hi_lo)[2])
lo_sz = (tf.shape(im)[1], up_sz[1])
hi_sz = (tf.shape(hi_hi)[1], up_sz[1])
im = (
_upsample(
_upsample(im, lo_sz, filters.synthesis_lo, 1, 0) +
_upsample(hi_lo, lo_sz, filters.synthesis_hi, 1, 1),
up_sz, filters.synthesis_lo, 0, 0) +
_upsample(
_upsample(lo_hi, hi_sz, filters.synthesis_lo, 1, 0) +
_upsample(hi_hi, hi_sz, filters.synthesis_hi, 1, 1),
up_sz, filters.synthesis_hi, 0, 1)) # pyformat: disable
return im
def rescale(pyr, scale_base):
"""Rescale a wavelet decomposition `pyr` by `scale_base`^level.
Args:
pyr: A wavelet decomposition produced by construct().
scale_base: The base of the exponentiation used for the per-level scaling.
Returns:
pyr where each level has been scaled by `scale_base`^level. The first
level is 0 and is therefore not scaled.
"""
pyr_norm = []
for d in range(len(pyr) - 1):
level_norm = []
scale = scale_base**d
for b in range(3):
level_norm.append(pyr[d][b] * scale)
pyr_norm.append(level_norm)
d = len(pyr) - 1
scale = scale_base**d
pyr_norm.append(pyr[d] * scale)
return pyr_norm
def flatten(pyr):
"""Flattens a wavelet decomposition into an image-like single Tensor.
construct() produces wavelet decompositions in the form of nested tuples,
which is convenient for TensorFlow. But Wavelets are often formatted like:
_____________________________________
| | | |
| Resid | Band11 | |
|________|________| Band01 |
| | | |
| Band12 | Band10 | |
|________|________|_________________|
| | |
| | |
| Band02 | Band00 |
| | |
| | |
|_________________|_________________|
This function turns our internal representation into this more-standard
representation. This is useful for visualization and for integration into
loss functions.
Args:
pyr: A pyramid-formatted wavelet decomposition produced by construct()
Returns:
A (num_channels, width, height) representation of pyr, as described above.
"""
flat = pyr[-1]
for d in range(len(pyr) - 2, -1, -1):
flat = tf.concat([
tf.concat([flat, pyr[d][1]], axis=2),
tf.concat([pyr[d][2], pyr[d][0]], axis=2)], axis=1) # pyformat: disable
return flat
def visualize(pyr, percentile=99.):
"""Visualizes a wavelet decomposition produced by construct().
Args:
pyr: A wavelet decomposition produced by construct(),
percentile: The percentile of the deviation for each (non-residual) wavelet
band to be clamped by before normalization. Seeting this to 100 causes
visualization to clamp to the maximum deviation, which preserves the
entire dynamic range but may make subtle details hard to see. A value of
99 (the default) will clip away the 1% largest-magnitude values in each
band.
Returns:
An image (a TF tensor of uint8's) of shape (width, height, num_channels).
Note that the input wavelet decomposition was produced from an image of
shape (num_channels, width, height) --- this function permutes the ordering
to what is expected in a planar image.
"""
vis_pyr = []
for d in range(len(pyr) - 1):
vis_band = []
for b in range(3):
band = pyr[d][b]
max_mag = tfp.stats.percentile(tf.abs(band), percentile)
vis_band.append(0.5 * (1. + tf.clip_by_value(band / max_mag, -1., 1.)))
vis_pyr.append(vis_band)
d = len(pyr) - 1
resid = pyr[d]
resid_norm = (resid - tf.reduce_min(resid)) / (
tf.reduce_max(resid) - tf.reduce_min(resid))
vis_pyr.append(resid_norm)
vis = tf.cast(
tf.math.round(255. * tf.transpose(flatten(vis_pyr), [1, 2, 0])), tf.uint8)
return vis
| 19,892 | 40.357588 | 80 | py |
stylegan-encoder | stylegan-encoder-master/robust_loss/cubic_spline.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements 1D cubic Hermite spline interpolation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def interpolate1d(x, values, tangents):
r"""Perform cubic hermite spline interpolation on a 1D spline.
The x coordinates of the spline knots are at [0 : 1 : len(values)-1].
Queries outside of the range of the spline are computed using linear
extrapolation. See https://en.wikipedia.org/wiki/Cubic_Hermite_spline
for details, where "x" corresponds to `x`, "p" corresponds to `values`, and
"m" corresponds to `tangents`.
Args:
x: A tensor of any size of single or double precision floats containing
the set of values to be used for interpolation into the spline.
values: A vector of single or double precision floats containing the value
of each knot of the spline being interpolated into. Must be the same
length as `tangents` and the same type as `x`.
tangents: A vector of single or double precision floats containing the
tangent (derivative) of each knot of the spline being interpolated into.
Must be the same length as `values` and the same type as `x`.
Returns:
The result of interpolating along the spline defined by `values`, and
`tangents`, using `x` as the query values. Will be the same length and type
as `x`.
"""
# `values` and `tangents` must have the same type as `x`.
tf.assert_type(values, x.dtype)
tf.assert_type(tangents, x.dtype)
float_dtype = x.dtype
assert_ops = [
# `values` must be a vector.
tf.Assert(tf.equal(tf.rank(values), 1), [tf.shape(values)]),
# `tangents` must be a vector.
tf.Assert(tf.equal(tf.rank(tangents), 1), [tf.shape(values)]),
# `values` and `tangents` must have the same length.
tf.Assert(
tf.equal(tf.shape(values)[0],
tf.shape(tangents)[0]),
[tf.shape(values)[0], tf.shape(tangents)[0]]),
]
with tf.control_dependencies(assert_ops):
# Find the indices of the knots below and above each x.
x_lo = tf.cast(
tf.floor(
tf.clip_by_value(x, 0., tf.cast(
tf.shape(values)[0] - 2, float_dtype))), tf.int32)
x_hi = x_lo + 1
# Compute the relative distance between each `x` and the knot below it.
t = x - tf.cast(x_lo, float_dtype)
# Compute the cubic hermite expansion of `t`.
t_sq = tf.square(t)
t_cu = t * t_sq
h01 = -2. * t_cu + 3. * t_sq
h00 = 1. - h01
h11 = t_cu - t_sq
h10 = h11 - t_sq + t
# Linearly extrapolate above and below the extents of the spline for all
# values.
value_before = tangents[0] * t + values[0]
value_after = tangents[-1] * (t - 1.) + values[-1]
# Cubically interpolate between the knots below and above each query point.
neighbor_values_lo = tf.gather(values, x_lo)
neighbor_values_hi = tf.gather(values, x_hi)
neighbor_tangents_lo = tf.gather(tangents, x_lo)
neighbor_tangents_hi = tf.gather(tangents, x_hi)
value_mid = (
neighbor_values_lo * h00 + neighbor_values_hi * h01 +
neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)
# Return the interpolated or extrapolated values for each query point,
# depending on whether or not the query lies within the span of the spline.
return tf.where(t < 0., value_before,
tf.where(t > 1., value_after, value_mid))
| 4,068 | 39.287129 | 79 | py |
stylegan-encoder | stylegan-encoder-master/robust_loss/util.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def log_safe(x):
"""The same as tf.math.log(x), but clamps the input to prevent NaNs."""
return tf.math.log(tf.minimum(x, tf.cast(3e37, x.dtype)))
def log1p_safe(x):
"""The same as tf.math.log1p(x), but clamps the input to prevent NaNs."""
return tf.math.log1p(tf.minimum(x, tf.cast(3e37, x.dtype)))
def exp_safe(x):
"""The same as tf.math.exp(x), but clamps the input to prevent NaNs."""
return tf.math.exp(tf.minimum(x, tf.cast(87.5, x.dtype)))
def expm1_safe(x):
"""The same as tf.math.expm1(x), but clamps the input to prevent NaNs."""
return tf.math.expm1(tf.minimum(x, tf.cast(87.5, x.dtype)))
def inv_softplus(y):
"""The inverse of tf.nn.softplus()."""
return tf.where(y > 87.5, y, tf.math.log(tf.math.expm1(y)))
def logit(y):
"""The inverse of tf.nn.sigmoid()."""
return -tf.math.log(1. / y - 1.)
def affine_sigmoid(real, lo=0, hi=1):
"""Maps reals to (lo, hi), where 0 maps to (lo+hi)/2."""
if not lo < hi:
raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))
alpha = tf.sigmoid(real) * (hi - lo) + lo
return alpha
def inv_affine_sigmoid(alpha, lo=0, hi=1):
"""The inverse of affine_sigmoid(., lo, hi)."""
if not lo < hi:
raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))
real = logit((alpha - lo) / (hi - lo))
return real
def affine_softplus(real, lo=0, ref=1):
"""Maps real numbers to (lo, infinity), where 0 maps to ref."""
if not lo < ref:
raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))
shift = inv_softplus(tf.cast(1., real.dtype))
scale = (ref - lo) * tf.nn.softplus(real + shift) + lo
return scale
def inv_affine_softplus(scale, lo=0, ref=1):
"""The inverse of affine_softplus(., lo, ref)."""
if not lo < ref:
raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))
shift = inv_softplus(tf.cast(1., scale.dtype))
real = inv_softplus((scale - lo) / (ref - lo)) - shift
return real
def students_t_nll(x, df, scale):
"""The NLL of a Generalized Student's T distribution (w/o including TFP)."""
return 0.5 * ((df + 1.) * tf.math.log1p(
(x / scale)**2. / df) + tf.math.log(df)) + tf.math.log(
tf.abs(scale)) + tf.math.lgamma(
0.5 * df) - tf.math.lgamma(0.5 * df + 0.5) + 0.5 * np.log(np.pi)
# A constant scale that makes tf.image.rgb_to_yuv() volume preserving.
_VOLUME_PRESERVING_YUV_SCALE = 1.580227820074
def rgb_to_syuv(rgb):
"""A volume preserving version of tf.image.rgb_to_yuv().
By "volume preserving" we mean that rgb_to_syuv() is in the "special linear
group", or equivalently, that the Jacobian determinant of the transformation
is 1.
Args:
rgb: A tensor whose last dimension corresponds to RGB channels and is of
size 3.
Returns:
A scaled YUV version of the input tensor, such that this transformation is
volume-preserving.
"""
return _VOLUME_PRESERVING_YUV_SCALE * tf.image.rgb_to_yuv(rgb)
def syuv_to_rgb(yuv):
"""A volume preserving version of tf.image.yuv_to_rgb().
By "volume preserving" we mean that rgb_to_syuv() is in the "special linear
group", or equivalently, that the Jacobian determinant of the transformation
is 1.
Args:
yuv: A tensor whose last dimension corresponds to scaled YUV channels and is
of size 3 (ie, the output of rgb_to_syuv()).
Returns:
An RGB version of the input tensor, such that this transformation is
volume-preserving.
"""
return tf.image.yuv_to_rgb(yuv / _VOLUME_PRESERVING_YUV_SCALE)
def image_dct(image):
"""Does a type-II DCT (aka "The DCT") on axes 1 and 2 of a rank-3 tensor."""
dct_y = tf.transpose(tf.spectral.dct(image, type=2, norm='ortho'), [0, 2, 1])
dct_x = tf.transpose(tf.spectral.dct(dct_y, type=2, norm='ortho'), [0, 2, 1])
return dct_x
def image_idct(dct_x):
"""Inverts image_dct(), by performing a type-III DCT."""
dct_y = tf.spectral.idct(tf.transpose(dct_x, [0, 2, 1]), type=2, norm='ortho')
image = tf.spectral.idct(tf.transpose(dct_y, [0, 2, 1]), type=2, norm='ortho')
return image
def compute_jacobian(f, x):
"""Computes the Jacobian of function `f` with respect to input `x`."""
x_ph = tf.placeholder(tf.float32, x.shape)
vec = lambda x: tf.reshape(x, [-1])
jacobian = tf.stack(
[vec(tf.gradients(vec(f(x_ph))[d], x_ph)[0]) for d in range(x.size)], 1)
with tf.Session() as sess:
jacobian = sess.run(jacobian, {x_ph: x})
return jacobian
def get_resource_as_file(path):
"""A uniform interface for internal/open-source files."""
class NullContextManager(object):
def __init__(self, dummy_resource=None):
self.dummy_resource = dummy_resource
def __enter__(self):
return self.dummy_resource
def __exit__(self, *args):
pass
return NullContextManager('./' + path)
def get_resource_filename(path):
"""A uniform interface for internal/open-source filenames."""
return './' + path
| 5,688 | 29.918478 | 80 | py |
stylegan-encoder | stylegan-encoder-master/robust_loss/general.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Implements the general form of the loss.
This is the simplest way of using this loss. No parameters will be tuned
automatically, it's just a simple function that takes in parameters (likely
hand-tuned ones) and return a loss. For an adaptive loss, look at adaptive.py
or distribution.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from robust_loss import util
def lossfun(x, alpha, scale, approximate=False, epsilon=1e-6):
r"""Implements the general form of the loss.
This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
Must be a tensorflow tensor or numpy array of floats.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Must be a tensorflow tensor or
numpy array of floats with the same precision as `x`. Varying alpha allows
for smooth interpolation between a number of discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha. Must be a tensorflow tensor or numpy
array of single-precision floats.
approximate: a bool, where if True, this function returns an approximate and
faster form of the loss, as described in the appendix of the paper. This
approximation holds well everywhere except as x and alpha approach zero.
epsilon: A float that determines how inaccurate the "approximate" version of
the loss will be. Larger values are less accurate but more numerically
stable. Must be great than single-precision machine epsilon.
Returns:
The losses for each element of x, in the same shape as x. This is returned
as a TensorFlow graph node of single precision floats.
"""
# `scale` and `alpha` must have the same type as `x`.
tf.assert_type(scale, x.dtype)
tf.assert_type(alpha, x.dtype)
float_dtype = x.dtype
# `scale` must be > 0.
assert_ops = [tf.Assert(tf.reduce_all(tf.greater(scale, 0.)), [scale])]
with tf.control_dependencies(assert_ops):
# Broadcast `alpha` and `scale` to have the same shape as `x`.
alpha = tf.broadcast_to(alpha, tf.shape(x))
scale = tf.broadcast_to(scale, tf.shape(x))
if approximate:
# `epsilon` must be greater than single-precision machine epsilon.
assert epsilon > np.finfo(np.float32).eps
# Compute an approximate form of the loss which is faster, but innacurate
# when x and alpha are near zero.
b = tf.abs(alpha - tf.cast(2., float_dtype)) + epsilon
d = tf.where(
tf.greater_equal(alpha, 0.), alpha + epsilon, alpha - epsilon)
loss = (b / d) * (tf.pow(tf.square(x / scale) / b + 1., 0.5 * d) - 1.)
else:
# Compute the exact loss.
# This will be used repeatedly.
squared_scaled_x = tf.square(x / scale)
# The loss when alpha == 2.
loss_two = 0.5 * squared_scaled_x
# The loss when alpha == 0.
loss_zero = util.log1p_safe(0.5 * squared_scaled_x)
# The loss when alpha == -infinity.
loss_neginf = -tf.math.expm1(-0.5 * squared_scaled_x)
# The loss when alpha == +infinity.
loss_posinf = util.expm1_safe(0.5 * squared_scaled_x)
# The loss when not in one of the above special cases.
machine_epsilon = tf.cast(np.finfo(np.float32).eps, float_dtype)
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
beta_safe = tf.maximum(machine_epsilon, tf.abs(alpha - 2.))
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
alpha_safe = tf.where(
tf.greater_equal(alpha, 0.), tf.ones_like(alpha),
-tf.ones_like(alpha)) * tf.maximum(machine_epsilon, tf.abs(alpha))
loss_otherwise = (beta_safe / alpha_safe) * (
tf.pow(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.)
# Select which of the cases of the loss to return.
loss = tf.where(
tf.equal(alpha, -tf.cast(float('inf'), float_dtype)), loss_neginf,
tf.where(
tf.equal(alpha, 0.), loss_zero,
tf.where(
tf.equal(alpha, 2.), loss_two,
tf.where(
tf.equal(alpha, tf.cast(float('inf'), float_dtype)),
loss_posinf, loss_otherwise))))
return loss
| 5,863 | 44.107692 | 80 | py |
stylegan-encoder | stylegan-encoder-master/training/networks_stylegan.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Network architectures used in the StyleGAN paper."""
import math
import config
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Primitive ops for manipulating 4D activation tensors.
# The gradients of these are not necessary efficient or even meaningful.
def _blur2d(x, f=[1,2,1], normalize=True, flip=False, stride=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(stride, int) and stride >= 1
# Finalize filter kernel.
f = np.array(f, dtype=np.float32)
if f.ndim == 1:
f = f[:, np.newaxis] * f[np.newaxis, :]
assert f.ndim == 2
if normalize:
f /= np.sum(f)
if flip:
f = f[::-1, ::-1]
f = f[:, :, np.newaxis, np.newaxis]
f = np.tile(f, [1, 1, int(x.shape[1]), 1])
# No-op => early exit.
if f.shape == (1, 1) and f[0,0] == 1:
return x
# Convolve using depthwise_conv2d.
orig_dtype = x.dtype
x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16
f = tf.constant(f, dtype=x.dtype, name='filter')
strides = [1, 1, stride, stride]
x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW')
x = tf.cast(x, orig_dtype)
return x
def _upscale2d(x, factor=2, gain=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(factor, int) and factor >= 1
# Apply gain.
if gain != 1:
x *= gain
# No-op => early exit.
if factor == 1:
return x
# Upscale using tf.tile().
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
def _downscale2d(x, factor=2, gain=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(factor, int) and factor >= 1
# 2x2, float32 => downscale using _blur2d().
if factor == 2 and x.dtype == tf.float32:
f = [np.sqrt(gain) / factor] * factor
return _blur2d(x, f=f, normalize=False, stride=factor)
# Apply gain.
if gain != 1:
x *= gain
# No-op => early exit.
if factor == 1:
return x
# Large factor => downscale using tf.nn.avg_pool().
# NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work.
ksize = [1, 1, factor, factor]
return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW')
#----------------------------------------------------------------------------
# High-level ops for manipulating 4D activation tensors.
# The gradients of these are meant to be as efficient as possible.
def blur2d(x, f=[1,2,1], normalize=True):
with tf.variable_scope('Blur2D'):
@tf.custom_gradient
def func(x):
y = _blur2d(x, f, normalize)
@tf.custom_gradient
def grad(dy):
dx = _blur2d(dy, f, normalize, flip=True)
return dx, lambda ddx: _blur2d(ddx, f, normalize)
return y, grad
return func(x)
def upscale2d(x, factor=2):
with tf.variable_scope('Upscale2D'):
@tf.custom_gradient
def func(x):
y = _upscale2d(x, factor)
@tf.custom_gradient
def grad(dy):
dx = _downscale2d(dy, factor, gain=factor**2)
return dx, lambda ddx: _upscale2d(ddx, factor)
return y, grad
return func(x)
def downscale2d(x, factor=2):
with tf.variable_scope('Downscale2D'):
@tf.custom_gradient
def func(x):
y = _downscale2d(x, factor)
@tf.custom_gradient
def grad(dy):
dx = _upscale2d(dy, factor, gain=1/factor**2)
return dx, lambda ddx: _downscale2d(ddx, factor)
return y, grad
return func(x)
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolutional or fully-connected layer.
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1, suffix=''):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = gain / np.sqrt(fan_in) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
else:
init_std = he_std / lrmul
runtime_coef = lrmul
# Create variable.
init = tf.initializers.random_normal(0, init_std)
return tf.get_variable('weight' + suffix, shape=shape, initializer=init) * runtime_coef
#----------------------------------------------------------------------------
# Fully-connected layer - replace with TreeConnect where appropriate
if hasattr(config, 'use_treeconnect') and config.use_treeconnect:
if (hasattr(config, 'treeconnect_threshold')):
treeconnect_threshold = config.treeconnect_threshold
else:
treeconnect_threshold = 1024
def is_square(n):
return (n == int(math.sqrt(n) + 0.5)**2)
def conv(inp,
k_h,
k_w,
c_o,
suffix = '',
**kwargs):
# Get the number of channels in the input
c_i = int(inp.get_shape()[1])
# Convolution for a given input and kernel
kernel = get_weight([k_h, k_w, c_i, c_o], suffix=suffix, **kwargs)
kernel = tf.cast(kernel, inp.dtype)
return tf.nn.conv2d(inp, kernel, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
def real_dense(x, fmaps, **kwargs):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
# replace dense layer with TreeConnect where possible - see https://github.com/OliverRichter/TreeConnect
def dense(x, fmaps, **kwargs):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
layer_size = x.get_shape().as_list()
layer_size = layer_size[1]
if "tensorflow" in str(type(fmaps)):
fm = fmaps.value
else:
fm = int(fmaps)
if layer_size + fm <= treeconnect_threshold: # option to only replace the larger dense layers
return real_dense(x, fmaps, **kwargs)
if is_square(layer_size): # work out layer dimensions
layer_l = int(math.sqrt(layer_size)+0.5)
layer_r = layer_l
else:
layer_m = math.log(math.sqrt(layer_size),2)
layer_l = 2**math.ceil(layer_m)
layer_r = layer_size // layer_l
if fm >= layer_size: # adjust channels for output size
fm = fm//layer_size
rf = 1
else:
rf = layer_size//fm
fm = 1
if rf > layer_l: # fall back to dense layer
return real_dense(x, fmaps, **kwargs)
x = tf.reshape(x, [tf.shape(x)[0], 1, layer_l, layer_r])
w = conv(x, layer_r, 1, 1, **kwargs)
w = tf.transpose(w, perm=[0,1,3,2])
if rf > 1: # reshape to use channels
w = tf.reshape(w, [tf.shape(x)[0], rf, layer_l // rf, layer_r])
w = conv(w, layer_l, 1, fm, suffix='_1', **kwargs) # add suffix to weights
w = tf.reshape(w, [tf.shape(x)[0], np.prod([d.value for d in w.shape[1:]])])
return w
else:
def dense(x, fmaps, **kwargs):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.
def conv2d(x, fmaps, kernel, **kwargs):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Fused convolution + scaling.
# Faster and uses less memory than performing the operations separately.
def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs):
assert kernel >= 1 and kernel % 2 == 1
assert fused_scale in [True, False, 'auto']
if fused_scale == 'auto':
fused_scale = min(x.shape[2:]) * 2 >= 128
# Not fused => call the individual ops directly.
if not fused_scale:
return conv2d(upscale2d(x), fmaps, kernel, **kwargs)
# Fused => perform both ops simultaneously using tf.nn.conv2d_transpose().
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in]
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
w = tf.cast(w, x.dtype)
os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs):
assert kernel >= 1 and kernel % 2 == 1
assert fused_scale in [True, False, 'auto']
if fused_scale == 'auto':
fused_scale = min(x.shape[2:]) >= 128
# Not fused => call the individual ops directly.
if not fused_scale:
return downscale2d(conv2d(x, fmaps, kernel, **kwargs))
# Fused => perform both ops simultaneously using tf.nn.conv2d().
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Apply bias to the given activation tensor.
def apply_bias(x, lrmul=1):
b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul
b = tf.cast(b, x.dtype)
if len(x.shape) == 2:
return x + b
return x + tf.reshape(b, [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16.
def leaky_relu(x, alpha=0.2):
with tf.variable_scope('LeakyReLU'):
alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
@tf.custom_gradient
def func(x):
y = tf.maximum(x, x * alpha)
@tf.custom_gradient
def grad(dy):
dx = tf.where(y >= 0, dy, dy * alpha)
return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha)
return y, grad
return func(x)
#----------------------------------------------------------------------------
# Pixelwise feature vector normalization.
def pixel_norm(x, epsilon=1e-8):
with tf.variable_scope('PixelNorm'):
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#----------------------------------------------------------------------------
# Instance normalization.
def instance_norm(x, epsilon=1e-8):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('InstanceNorm'):
orig_dtype = x.dtype
x = tf.cast(x, tf.float32)
x -= tf.reduce_mean(x, axis=[2,3], keepdims=True)
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon)
x = tf.cast(x, orig_dtype)
return x
#----------------------------------------------------------------------------
# Style modulation.
def style_mod(x, dlatent, **kwargs):
with tf.variable_scope('StyleMod'):
style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs))
style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2))
return x * (style[:,0] + 1) + style[:,1]
#----------------------------------------------------------------------------
# Noise input.
def apply_noise(x, noise_var=None, randomize_noise=True):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('Noise'):
if noise_var is None or randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_var, x.dtype)
weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros())
return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Minibatch standard deviation.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Style-based generator used in the StyleGAN paper.
# Composed of two sub-networks (G_mapping and G_synthesis) that are defined below.
def G_style(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable.
truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable.
truncation_psi_val = None, # Value for truncation_psi to use during validation.
truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation.
dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable.
style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable.
is_training = False, # Network is under training? Enables and disables specific features.
is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
**kwargs): # Arguments for sub-networks (G_mapping and G_synthesis).
# Validate arguments.
assert not is_training or not is_validation
assert isinstance(components, dnnlib.EasyDict)
if is_validation:
truncation_psi = truncation_psi_val
truncation_cutoff = truncation_cutoff_val
if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1):
truncation_psi = None
if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0):
truncation_cutoff = None
if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1):
dlatent_avg_beta = None
if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0):
style_mixing_prob = None
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs)
num_layers = components.synthesis.input_shape[1]
dlatent_size = components.synthesis.input_shape[2]
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs)
dlorig = dlatents
dlatents = tf.cast(dlatents, dtype=np.float32)
# Update moving average of W.
if dlatent_avg_beta is not None:
with tf.variable_scope('DlatentAvg'):
batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0)
update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta))
with tf.control_dependencies([update_op]):
dlatents = tf.identity(dlatents)
# Perform style mixing regularization.
if style_mixing_prob is not None:
with tf.name_scope('StyleMix'):
latents2 = tf.random_normal(tf.shape(latents_in))
dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs)
dlatents2 = tf.cast(dlatents2, dlatents.dtype)
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2
mixing_cutoff = tf.cond(
tf.random_uniform([], 0.0, 1.0) < style_mixing_prob,
lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32),
lambda: cur_layers)
dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2)
# Apply truncation trick.
if truncation_psi is not None and truncation_cutoff is not None:
with tf.variable_scope('Truncation'):
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
ones = np.ones(layer_idx.shape, dtype=np.float32)
coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones)
dlatents = tflib.lerp(dlatent_avg, dlatents, coefs)
dlatents = tf.cast(dlatents, dtype=dlorig.dtype)
# Evaluate synthesis network.
with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]):
images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Mapping network used in the StyleGAN paper.
def G_mapping(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size = 512, # Latent vector (Z) dimensionality.
label_size = 0, # Label dimensionality, 0 if no labels.
dlatent_size = 512, # Disentangled latent (W) dimensionality.
dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
mapping_layers = 8, # Number of mapping layers.
mapping_fmaps = 512, # Number of activations in the mapping layers.
mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers.
mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'.
use_wscale = True, # Enable equalized learning rate?
normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers?
epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization.
dtype = 'float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
def PN(x): return pixel_norm(x, epsilon=epsilon) if normalize_latents else x
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity]
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
# Embed labels and concatenate them with latents.
if label_size:
with tf.variable_scope('LabelConcat'):
w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal())
y = tf.matmul(labels_in, tf.cast(w, dtype))
x = tf.concat([x, y], axis=1)
# Normalize latents.
x = PN(x)
# Mapping layers.
for layer_idx in range(mapping_layers):
with tf.variable_scope('Dense%d' % layer_idx):
fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul)
x = apply_bias(x, lrmul=mapping_lrmul)
x = act(x)
# Broadcast.
if dlatent_broadcast is not None:
with tf.variable_scope('Broadcast'):
x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# Synthesis network used in the StyleGAN paper.
def G_synthesis(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
use_styles = True, # Enable style inputs?
const_input_layer = True, # First layer is a learned constant?
use_noise = True, # Enable noise inputs?
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'
use_wscale = True, # Enable equalized learning rate?
use_pixel_norm = False, # Enable pixelwise feature vector normalization?
epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization.
use_instance_norm = True, # Enable instance normalization?
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def PN(x): return pixel_norm(x, epsilon=epsilon) if use_pixel_norm else x
def IN(x): return instance_norm(x, epsilon=epsilon) if use_instance_norm else x
def blur(x): return blur2d(x, blur_filter) if blur_filter else x
if is_template_graph: force_clean_graph = True
if force_clean_graph: randomize_noise = False
if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive'
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity]
num_layers = resolution_log2 * 2 - 2
num_styles = num_layers if use_styles else 1
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_styles, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
# Noise inputs.
noise_inputs = []
if use_noise:
for layer_idx in range(num_layers):
res = layer_idx // 2 + 2
shape = [1, use_noise, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Things to do at the end of each layer.
def layer_epilogue(x, layer_idx):
if use_noise:
x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise)
x = apply_bias(x)
x = act(x)
x = PN(x)
x = IN(x)
if use_styles:
x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale)
return x
# Early layers.
with tf.variable_scope('4x4'):
if const_input_layer:
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones())
x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0)
else:
with tf.variable_scope('Dense'):
x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN
x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0)
with tf.variable_scope('Conv'):
x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1)
# Building blocks for remaining layers.
def block(res, x): # res = 3..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0_up'):
x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4)
with tf.variable_scope('Conv1'):
x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3)
return x
def torgb(res, x): # res = 2..resolution_log2
lod = resolution_log2 - res
with tf.variable_scope('ToRGB_lod%d' % lod):
return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale))
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
for res in range(3, resolution_log2 + 1):
x = block(res, x)
images_out = torgb(resolution_log2, x)
# Linear structure: simple but inefficient.
if structure == 'linear':
images_out = torgb(2, x)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(res, x)
img = torgb(res, x)
images_out = upscale2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = tflib.lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(x, res, lod):
y = block(res, x)
img = lambda: upscale2d(torgb(res, y), 2**lod)
img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(x, 3, resolution_log2 - 3)
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Discriminator used in the StyleGAN paper.
def D_basic(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of input color channels. Overridden based on dataset.
resolution = 32, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu',
use_wscale = True, # Enable equalized learning rate?
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def blur(x): return blur2d(x, blur_filter) if blur_filter else x
if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive'
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity]
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
scores_out = None
# Building blocks.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale)))
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res >= 3: # 8x8 and up
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Conv1_down'):
x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)))
else: # 4x4
if mbstd_group_size > 1:
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Dense0'):
x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Dense1'):
x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale))
return x
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
x = fromrgb(images_in, resolution_log2)
for res in range(resolution_log2, 2, -1):
x = block(x, res)
scores_out = block(x, 2)
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
img = downscale2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = tflib.lerp_clip(x, y, lod_in - lod)
scores_out = block(x, 2)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(res, lod):
x = lambda: fromrgb(downscale2d(images_in, 2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod))
return y()
scores_out = grow(2, resolution_log2 - 2)
# Label conditioning from "Which Training Methods for GANs do actually Converge?"
if label_size:
with tf.variable_scope('LabelSwitch'):
scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True)
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
| 36,721 | 48.557355 | 191 | py |
stylegan-encoder | stylegan-encoder-master/training/networks_progan.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Network architectures used in the ProGAN paper."""
import numpy as np
import tensorflow as tf
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
def lerp(a, b, t): return a + (b - a) * t
def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolutional or fully-connected layer.
def get_weight(shape, gain=np.sqrt(2), use_wscale=False):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
std = gain / np.sqrt(fan_in) # He init
if use_wscale:
wscale = tf.constant(np.float32(std), name='wscale')
w = tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
else:
w = tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))
return w
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.
def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Apply bias to the given activation tensor.
def apply_bias(x):
b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros())
b = tf.cast(b, x.dtype)
if len(x.shape) == 2:
return x + b
return x + tf.reshape(b, [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16.
def leaky_relu(x, alpha=0.2):
with tf.name_scope('LeakyRelu'):
alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
return tf.maximum(x * alpha, x)
#----------------------------------------------------------------------------
# Nearest-neighbor upscaling layer.
def upscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Upscale2D'):
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
#----------------------------------------------------------------------------
# Fused upscale2d + conv2d.
# Faster and uses less memory than performing the operations separately.
def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in]
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
w = tf.cast(w, x.dtype)
os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Box filter downscaling layer.
def downscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Downscale2D'):
ksize = [1, 1, factor, factor]
return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') # NOTE: requires tf_config['graph_options.place_pruned_graph'] = True
#----------------------------------------------------------------------------
# Fused conv2d + downscale2d.
# Faster and uses less memory than performing the operations separately.
def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Pixelwise feature vector normalization.
def pixel_norm(x, epsilon=1e-8):
with tf.variable_scope('PixelNorm'):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#----------------------------------------------------------------------------
# Minibatch standard deviation.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Networks used in the ProgressiveGAN paper.
def G_paper(
latents_in, # First input: Latent vectors [minibatch, latent_size].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of output color channels. Overridden based on dataset.
resolution = 32, # Output resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
latent_size = None, # Dimensionality of the latent vectors. None = min(fmap_base, fmap_max).
normalize_latents = True, # Normalize latent vectors before feeding them to the network?
use_wscale = True, # Enable equalized learning rate?
use_pixelnorm = True, # Enable pixelwise feature vector normalization?
pixelnorm_epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization.
use_leakyrelu = True, # True = leaky ReLU, False = ReLU.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = True, # True = use fused upscale2d + conv2d, False = separate upscale2d layers.
structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def PN(x): return pixel_norm(x, epsilon=pixelnorm_epsilon) if use_pixelnorm else x
if latent_size is None: latent_size = nf(0)
if structure is None: structure = 'linear' if is_template_graph else 'recursive'
act = leaky_relu if use_leakyrelu else tf.nn.relu
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
combo_in = tf.cast(tf.concat([latents_in, labels_in], axis=1), dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
images_out = None
# Building blocks.
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res == 2: # 4x4
if normalize_latents: x = pixel_norm(x, epsilon=pixelnorm_epsilon)
with tf.variable_scope('Dense'):
x = dense(x, fmaps=nf(res-1)*16, gain=np.sqrt(2)/4, use_wscale=use_wscale) # override gain to match the original Theano implementation
x = tf.reshape(x, [-1, nf(res-1), 4, 4])
x = PN(act(apply_bias(x)))
with tf.variable_scope('Conv'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
else: # 8x8 and up
if fused_scale:
with tf.variable_scope('Conv0_up'):
x = PN(act(apply_bias(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
else:
x = upscale2d(x)
with tf.variable_scope('Conv0'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
with tf.variable_scope('Conv1'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
return x
def torgb(x, res): # res = 2..resolution_log2
lod = resolution_log2 - res
with tf.variable_scope('ToRGB_lod%d' % lod):
return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale))
# Linear structure: simple but inefficient.
if structure == 'linear':
x = block(combo_in, 2)
images_out = torgb(x, 2)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(x, res)
img = torgb(x, res)
images_out = upscale2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def grow(x, res, lod):
y = block(x, res)
img = lambda: upscale2d(torgb(y, res), 2**lod)
if res > 2: img = cset(img, (lod_in > lod), lambda: upscale2d(lerp(torgb(y, res), upscale2d(torgb(x, res - 1)), lod_in - lod), 2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(combo_in, 2, resolution_log2 - 2)
assert images_out.dtype == tf.as_dtype(dtype)
images_out = tf.identity(images_out, name='images_out')
return images_out
def D_paper(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of input color channels. Overridden based on dataset.
resolution = 32, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
use_wscale = True, # Enable equalized learning rate?
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = True, # True = use fused conv2d + downscale2d, False = separate downscale2d layers.
structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
if structure is None: structure = 'linear' if is_template_graph else 'recursive'
act = leaky_relu
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
scores_out = None
# Building blocks.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, use_wscale=use_wscale)))
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res >= 3: # 8x8 and up
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
if fused_scale:
with tf.variable_scope('Conv1_down'):
x = act(apply_bias(conv2d_downscale2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
else:
with tf.variable_scope('Conv1'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
x = downscale2d(x)
else: # 4x4
if mbstd_group_size > 1:
x = minibatch_stddev_layer(x, mbstd_group_size)
with tf.variable_scope('Conv'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
with tf.variable_scope('Dense0'):
x = act(apply_bias(dense(x, fmaps=nf(res-2), use_wscale=use_wscale)))
with tf.variable_scope('Dense1'):
x = apply_bias(dense(x, fmaps=1, gain=1, use_wscale=use_wscale))
return x
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
img = downscale2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = lerp_clip(x, y, lod_in - lod)
scores_out = block(x, 2)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def grow(res, lod):
x = lambda: fromrgb(downscale2d(images_in, 2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
if res > 2: y = cset(y, (lod_in > lod), lambda: lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod))
return y()
scores_out = grow(2, resolution_log2 - 2)
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
| 17,575 | 53.414861 | 191 | py |
stylegan-encoder | stylegan-encoder-master/training/loss.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Loss functions."""
import tensorflow as tf
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
#----------------------------------------------------------------------------
# Convenience func that casts all of its arguments to tf.float32.
def fp32(*values):
if len(values) == 1 and isinstance(values[0], tuple):
values = values[0]
values = tuple(tf.cast(v, tf.float32) for v in values)
return values if len(values) >= 2 else values[0]
#----------------------------------------------------------------------------
# WGAN & WGAN-GP loss functions.
def G_wgan(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = -fake_scores_out
return loss
def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_epsilon = 0.001): # Weight for the epsilon term, \epsilon_{drift}.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
return loss
def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_lambda = 10.0, # Weight for the gradient penalty term.
wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}.
wgan_target = 1.0): # Target value for gradient magnitudes.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True))
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
return loss
#----------------------------------------------------------------------------
# Hinge loss functions. (Use G_wgan with these)
def D_hinge(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out)
return loss
def D_hinge_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_lambda = 10.0, # Weight for the gradient penalty term.
wgan_target = 1.0): # Target value for gradient magnitudes.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out)
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True))
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
return loss
#----------------------------------------------------------------------------
# Loss functions advocated by the paper
# "Which Training Methods for GANs do actually Converge?"
def G_logistic_saturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = -tf.nn.softplus(fake_scores_out) # log(1 - logistic(fake_scores_out))
return loss
def G_logistic_nonsaturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = tf.nn.softplus(-fake_scores_out) # -log(logistic(fake_scores_out))
return loss
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type
return loss
def D_logistic_simplegp(G, D, opt, training_set, minibatch_size, reals, labels, r1_gamma=10.0, r2_gamma=0.0): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type
if r1_gamma != 0.0:
with tf.name_scope('R1Penalty'):
real_loss = opt.apply_loss_scaling(tf.reduce_sum(real_scores_out))
real_grads = opt.undo_loss_scaling(fp32(tf.gradients(real_loss, [reals])[0]))
r1_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3])
r1_penalty = autosummary('Loss/r1_penalty', r1_penalty)
loss += r1_penalty * (r1_gamma * 0.5)
if r2_gamma != 0.0:
with tf.name_scope('R2Penalty'):
fake_loss = opt.apply_loss_scaling(tf.reduce_sum(fake_scores_out))
fake_grads = opt.undo_loss_scaling(fp32(tf.gradients(fake_loss, [fake_images_out])[0]))
r2_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1,2,3])
r2_penalty = autosummary('Loss/r2_penalty', r2_penalty)
loss += r2_penalty * (r2_gamma * 0.5)
return loss
#----------------------------------------------------------------------------
| 10,416 | 57.522472 | 154 | py |
stylegan-encoder | stylegan-encoder-master/training/misc.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous utility functions."""
import os
import glob
import pickle
import re
import numpy as np
from collections import defaultdict
import PIL.Image
import dnnlib
import config
from training import dataset
#----------------------------------------------------------------------------
# Convenience wrappers for pickle that are able to load data produced by
# older versions of the code, and from external URLs.
def open_file_or_url(file_or_url):
if dnnlib.util.is_url(file_or_url):
return dnnlib.util.open_url(file_or_url, cache_dir=config.cache_dir)
return open(file_or_url, 'rb')
def load_pkl(file_or_url):
with open_file_or_url(file_or_url) as file:
return pickle.load(file, encoding='latin1')
def save_pkl(obj, filename):
with open(filename, 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
#----------------------------------------------------------------------------
# Image utils.
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def create_image_grid(images, grid_size=None):
assert images.ndim == 3 or images.ndim == 4
num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[..., y : y + img_h, x : x + img_w] = images[idx]
return grid
def convert_to_pil_image(image, drange=[0,1]):
assert image.ndim == 2 or image.ndim == 3
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0] # grayscale CHW => HW
else:
image = image.transpose(1, 2, 0) # CHW -> HWC
image = adjust_dynamic_range(image, drange, [0,255])
image = np.rint(image).clip(0, 255).astype(np.uint8)
fmt = 'RGB' if image.ndim == 3 else 'L'
return PIL.Image.fromarray(image, fmt)
def save_image(image, filename, drange=[0,1], quality=95):
img = convert_to_pil_image(image, drange)
if '.jpg' in filename:
img.save(filename,"JPEG", quality=quality, optimize=True)
else:
img.save(filename)
def save_image_grid(images, filename, drange=[0,1], grid_size=None):
convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename)
#----------------------------------------------------------------------------
# Locating results.
def locate_run_dir(run_id_or_run_dir):
if isinstance(run_id_or_run_dir, str):
if os.path.isdir(run_id_or_run_dir):
return run_id_or_run_dir
converted = dnnlib.submission.submit.convert_path(run_id_or_run_dir)
if os.path.isdir(converted):
return converted
run_dir_pattern = re.compile('^0*%s-' % str(run_id_or_run_dir))
for search_dir in ['']:
full_search_dir = config.result_dir if search_dir == '' else os.path.normpath(os.path.join(config.result_dir, search_dir))
run_dir = os.path.join(full_search_dir, str(run_id_or_run_dir))
if os.path.isdir(run_dir):
return run_dir
run_dirs = sorted(glob.glob(os.path.join(full_search_dir, '*')))
run_dirs = [run_dir for run_dir in run_dirs if run_dir_pattern.match(os.path.basename(run_dir))]
run_dirs = [run_dir for run_dir in run_dirs if os.path.isdir(run_dir)]
if len(run_dirs) == 1:
return run_dirs[0]
raise IOError('Cannot locate result subdir for run', run_id_or_run_dir)
def list_network_pkls(run_id_or_run_dir, include_final=True):
run_dir = locate_run_dir(run_id_or_run_dir)
pkls = sorted(glob.glob(os.path.join(run_dir, 'network-*.pkl')))
if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl':
if include_final:
pkls.append(pkls[0])
del pkls[0]
return pkls
def locate_latest_pkl():
allpickles = sorted(glob.glob(os.path.join(config.result_dir, '0*', 'network-*.pkl')))
latest_pickle = allpickles[-1]
resume_run_id = os.path.basename(os.path.dirname(latest_pickle))
RE_KIMG = re.compile('network-snapshot-(\d+).pkl')
kimg = int(RE_KIMG.match(os.path.basename(latest_pickle)).group(1))
return (locate_network_pkl(resume_run_id), float(kimg))
def locate_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl=None):
for candidate in [snapshot_or_network_pkl, run_id_or_run_dir_or_network_pkl]:
if isinstance(candidate, str):
if os.path.isfile(candidate):
return candidate
converted = dnnlib.submission.submit.convert_path(candidate)
if os.path.isfile(converted):
return converted
pkls = list_network_pkls(run_id_or_run_dir_or_network_pkl)
if len(pkls) >= 1 and snapshot_or_network_pkl is None:
return pkls[-1]
for pkl in pkls:
try:
name = os.path.splitext(os.path.basename(pkl))[0]
number = int(name.split('-')[-1])
if number == snapshot_or_network_pkl:
return pkl
except ValueError: pass
except IndexError: pass
raise IOError('Cannot locate network pkl for snapshot', snapshot_or_network_pkl)
def get_id_string_for_network_pkl(network_pkl):
p = network_pkl.replace('.pkl', '').replace('\\', '/').split('/')
return '-'.join(p[max(len(p) - 2, 0):])
#----------------------------------------------------------------------------
# Loading data from previous training runs.
def load_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl=None):
return load_pkl(locate_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl))
def parse_config_for_previous_run(run_id):
run_dir = locate_run_dir(run_id)
# Parse config.txt.
cfg = defaultdict(dict)
with open(os.path.join(run_dir, 'config.txt'), 'rt') as f:
for line in f:
line = re.sub(r"^{?\s*'(\w+)':\s*{(.*)(},|}})$", r"\1 = {\2}", line.strip())
if line.startswith('dataset =') or line.startswith('train ='):
exec(line, cfg, cfg) # pylint: disable=exec-used
# Handle legacy options.
if 'file_pattern' in cfg['dataset']:
cfg['dataset']['tfrecord_dir'] = cfg['dataset'].pop('file_pattern').replace('-r??.tfrecords', '')
if 'mirror_augment' in cfg['dataset']:
cfg['train']['mirror_augment'] = cfg['dataset'].pop('mirror_augment')
if 'max_labels' in cfg['dataset']:
v = cfg['dataset'].pop('max_labels')
if v is None: v = 0
if v == 'all': v = 'full'
cfg['dataset']['max_label_size'] = v
if 'max_images' in cfg['dataset']:
cfg['dataset'].pop('max_images')
return cfg
def load_dataset_for_previous_run(run_id, **kwargs): # => dataset_obj, mirror_augment
cfg = parse_config_for_previous_run(run_id)
cfg['dataset'].update(kwargs)
dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **cfg['dataset'])
mirror_augment = cfg['train'].get('mirror_augment', False)
return dataset_obj, mirror_augment
def apply_mirror_augment(minibatch):
mask = np.random.rand(minibatch.shape[0]) < 0.5
minibatch = np.array(minibatch)
minibatch[mask] = minibatch[mask, :, :, ::-1]
return minibatch
#----------------------------------------------------------------------------
# Size and contents of the image snapshot grids that are exported
# periodically during training.
def setup_snapshot_image_grid(G, training_set,
size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.
layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.
# Select size.
gw = 1; gh = 1
if size == '1080p':
gw = np.clip(1920 // G.output_shape[3], 3, 32)
gh = np.clip(1080 // G.output_shape[2], 2, 32)
if size == '4k':
gw = np.clip(3840 // G.output_shape[3], 7, 32)
gh = np.clip(2160 // G.output_shape[2], 4, 32)
# Initialize data arrays.
reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)
labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
latents = np.random.randn(gw * gh, *G.input_shape[1:])
# Random layout.
if layout == 'random':
reals[:], labels[:] = training_set.get_minibatch_np(gw * gh)
# Class-conditional layouts.
class_layouts = dict(row_per_class=[gw,1], col_per_class=[1,gh], class4x4=[4,4])
if layout in class_layouts:
bw, bh = class_layouts[layout]
nw = (gw - 1) // bw + 1
nh = (gh - 1) // bh + 1
blocks = [[] for _i in range(nw * nh)]
for _iter in range(1000000):
real, label = training_set.get_minibatch_np(1)
idx = np.argmax(label[0])
while idx < len(blocks) and len(blocks[idx]) >= bw * bh:
idx += training_set.label_size
if idx < len(blocks):
blocks[idx].append((real, label))
if all(len(block) >= bw * bh for block in blocks):
break
for i, block in enumerate(blocks):
for j, (real, label) in enumerate(block):
x = (i % nw) * bw + j % bw
y = (i // nw) * bh + j // bw
if x < gw and y < gh:
reals[x + y * gw] = real[0]
labels[x + y * gw] = label[0]
return (gw, gh), reals, labels, latents
#----------------------------------------------------------------------------
| 10,406 | 39.972441 | 136 | py |
stylegan-encoder | stylegan-encoder-master/training/dataset.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Multi-resolution input data pipeline."""
import os
import glob
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
#----------------------------------------------------------------------------
# Parse individual image from a tfrecords file.
def parse_tfrecord_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
def parse_tfrecord_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value # temporary pylint workaround # pylint: disable=no-member
data = ex.features.feature['data'].bytes_list.value[0] # temporary pylint workaround # pylint: disable=no-member
return np.fromstring(data, np.uint8).reshape(shape)
#----------------------------------------------------------------------------
# Dataset class that loads data from tfrecords files.
class TFRecordDataset:
def __init__(self,
tfrecord_dir, # Directory containing a collection of tfrecords files.
resolution = None, # Dataset resolution, None = autodetect.
label_file = None, # Relative path of the labels file, None = autodetect.
max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components.
repeat = True, # Repeat dataset indefinitely.
shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling.
prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching.
buffer_mb = 256, # Read buffer size (megabytes).
num_threads = 2): # Number of concurrent threads.
self.tfrecord_dir = tfrecord_dir
self.resolution = None
self.resolution_log2 = None
self.shape = [] # [channel, height, width]
self.dtype = 'uint8'
self.dynamic_range = [0, 255]
self.label_file = label_file
self.label_size = None # [component]
self.label_dtype = None
self._np_labels = None
self._tf_minibatch_in = None
self._tf_labels_var = None
self._tf_labels_dataset = None
self._tf_datasets = dict()
self._tf_iterator = None
self._tf_init_ops = dict()
self._tf_minibatch_np = None
self._cur_minibatch = -1
self._cur_lod = -1
# List tfrecords files and inspect their shapes.
assert os.path.isdir(self.tfrecord_dir)
tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords')))
assert len(tfr_files) >= 1
tfr_shapes = []
for tfr_file in tfr_files:
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt):
tfr_shapes.append(parse_tfrecord_np(record).shape)
break
# Autodetect label filename.
if self.label_file is None:
guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels')))
if len(guess):
self.label_file = guess[0]
elif not os.path.isfile(self.label_file):
guess = os.path.join(self.tfrecord_dir, self.label_file)
if os.path.isfile(guess):
self.label_file = guess
# Determine shape and resolution.
max_shape = max(tfr_shapes, key=np.prod)
self.resolution = resolution if resolution is not None else max_shape[1]
self.resolution_log2 = int(np.log2(self.resolution))
self.shape = [max_shape[0], self.resolution, self.resolution]
tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes]
assert all(shape[0] == max_shape[0] for shape in tfr_shapes)
assert all(shape[1] == shape[2] for shape in tfr_shapes)
assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods))
assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1))
# Load labels.
assert max_label_size == 'full' or max_label_size >= 0
self._np_labels = np.zeros([1<<20, 0], dtype=np.float32)
if self.label_file is not None and max_label_size != 0:
self._np_labels = np.load(self.label_file)
assert self._np_labels.ndim == 2
if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size:
self._np_labels = self._np_labels[:, :max_label_size]
self.label_size = self._np_labels.shape[1]
self.label_dtype = self._np_labels.dtype.name
# Build TF expressions.
with tf.name_scope('Dataset'), tf.device('/cpu:0'):
self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[])
self._tf_labels_var = tflib.create_var_with_large_initial_value(self._np_labels, name='labels_var')
self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var)
for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods):
if tfr_lod < 0:
continue
dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20)
dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads)
dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset))
bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize
if shuffle_mb > 0:
dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1)
if repeat:
dset = dset.repeat()
if prefetch_mb > 0:
dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1)
dset = dset.batch(self._tf_minibatch_in)
self._tf_datasets[tfr_lod] = dset
self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes)
self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()}
# Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf().
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and lod in self._tf_datasets
if self._cur_minibatch != minibatch_size or self._cur_lod != lod:
self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})
self._cur_minibatch = minibatch_size
self._cur_lod = lod
# Get next minibatch as TensorFlow expressions.
def get_minibatch_tf(self): # => images, labels
return self._tf_iterator.get_next()
# Get next minibatch as NumPy arrays.
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tflib.run(self._tf_minibatch_np)
# Get random labels as TensorFlow expression.
def get_random_labels_tf(self, minibatch_size): # => labels
if self.label_size > 0:
with tf.device('/cpu:0'):
return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32))
return tf.zeros([minibatch_size, 0], self.label_dtype)
# Get random labels as NumPy array.
def get_random_labels_np(self, minibatch_size): # => labels
if self.label_size > 0:
return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])]
return np.zeros([minibatch_size, 0], self.label_dtype)
#----------------------------------------------------------------------------
# Base class for datasets that are generated on the fly.
class SyntheticDataset:
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
self.resolution = resolution
self.resolution_log2 = int(np.log2(resolution))
self.shape = [num_channels, resolution, resolution]
self.dtype = dtype
self.dynamic_range = dynamic_range
self.label_size = label_size
self.label_dtype = label_dtype
self._tf_minibatch_var = None
self._tf_lod_var = None
self._tf_minibatch_np = None
self._tf_labels_np = None
assert self.resolution == 2 ** self.resolution_log2
with tf.name_scope('Dataset'):
self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and 0 <= lod <= self.resolution_log2
tflib.set_vars({self._tf_minibatch_var: minibatch_size, self._tf_lod_var: lod})
def get_minibatch_tf(self): # => images, labels
with tf.name_scope('SyntheticDataset'):
shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32)
shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink]
images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape)
labels = self._generate_labels(self._tf_minibatch_var)
return images, labels
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tflib.run(self._tf_minibatch_np)
def get_random_labels_tf(self, minibatch_size): # => labels
with tf.name_scope('SyntheticDataset'):
return self._generate_labels(minibatch_size)
def get_random_labels_np(self, minibatch_size): # => labels
self.configure(minibatch_size)
if self._tf_labels_np is None:
self._tf_labels_np = self.get_random_labels_tf(minibatch_size)
return tflib.run(self._tf_labels_np)
def _generate_images(self, minibatch, lod, shape): # to be overridden by subclasses # pylint: disable=unused-argument
return tf.zeros([minibatch] + shape, self.dtype)
def _generate_labels(self, minibatch): # to be overridden by subclasses
return tf.zeros([minibatch, self.label_size], self.label_dtype)
#----------------------------------------------------------------------------
# Helper func for constructing a dataset object using the given options.
def load_dataset(class_name='training.dataset.TFRecordDataset', data_dir=None, verbose=False, **kwargs):
adjusted_kwargs = dict(kwargs)
if 'tfrecord_dir' in adjusted_kwargs and data_dir is not None:
adjusted_kwargs['tfrecord_dir'] = os.path.join(data_dir, adjusted_kwargs['tfrecord_dir'])
if verbose:
print('Streaming data using %s...' % class_name)
dataset = dnnlib.util.get_obj_by_name(class_name)(**adjusted_kwargs)
if verbose:
print('Dataset shape =', np.int32(dataset.shape).tolist())
print('Dynamic range =', dataset.dynamic_range)
print('Label size =', dataset.label_size)
return dataset
#----------------------------------------------------------------------------
| 12,220 | 49.5 | 135 | py |
stylegan-encoder | stylegan-encoder-master/training/__init__.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
# empty
| 350 | 38 | 76 | py |
stylegan-encoder | stylegan-encoder-master/training/training_loop.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Main training script."""
import os
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
import config
import train
from training import dataset
from training import misc
from metrics import metric_base
#----------------------------------------------------------------------------
# Just-in-time processing of training images before feeding them to the networks.
def process_reals(x, lod, mirror_augment, drange_data, drange_net):
with tf.name_scope('ProcessReals'):
with tf.name_scope('DynamicRange'):
x = tf.cast(x, tf.float32)
x = misc.adjust_dynamic_range(x, drange_data, drange_net)
if mirror_augment:
with tf.name_scope('MirrorAugment'):
s = tf.shape(x)
mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
mask = tf.tile(mask, [1, s[1], s[2], s[3]])
x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
s = tf.shape(x)
y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
y = tf.tile(y, [1, 1, 1, 2, 1, 2])
y = tf.reshape(y, [-1, s[1], s[2], s[3]])
x = tflib.lerp(x, y, lod - tf.floor(lod))
with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
s = tf.shape(x)
factor = tf.cast(2 ** tf.floor(lod), tf.int32)
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
#----------------------------------------------------------------------------
# Evaluate time-varying training parameters.
def training_schedule(
cur_nimg,
training_set,
num_gpus,
lod_initial_resolution = 4, # Image resolution used at the beginning.
lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution.
lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers.
minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs.
minibatch_dict = {}, # Resolution-specific overrides.
max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU.
G_lrate_base = 0.001, # Learning rate for the generator.
G_lrate_dict = {}, # Resolution-specific overrides.
D_lrate_base = 0.001, # Learning rate for the discriminator.
D_lrate_dict = {}, # Resolution-specific overrides.
lrate_rampup_kimg = 0, # Duration of learning rate ramp-up.
tick_kimg_base = 160, # Default interval of progress snapshots.
tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:30, 1024:20}, # Resolution-specific overrides.
restore_partial_fn = None # Filename of to be restored network
):
# Initialize result dict.
s = dnnlib.EasyDict()
s.kimg = cur_nimg / 1000.0
# Training phase.
phase_dur = lod_training_kimg + lod_transition_kimg
phase_idx = int(np.floor(s.kimg / phase_dur)) if phase_dur > 0 else 0
phase_kimg = s.kimg - phase_idx * phase_dur
# Level-of-detail and resolution.
s.lod = training_set.resolution_log2
s.lod -= np.floor(np.log2(lod_initial_resolution))
s.lod -= phase_idx
if lod_transition_kimg > 0:
s.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg
s.lod = max(s.lod, 0.0)
s.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(s.lod)))
# Minibatch size.
s.minibatch = minibatch_dict.get(s.resolution, minibatch_base)
s.minibatch -= s.minibatch % num_gpus
if s.resolution in max_minibatch_per_gpu:
s.minibatch = min(s.minibatch, max_minibatch_per_gpu[s.resolution] * num_gpus)
# Learning rate.
s.G_lrate = G_lrate_dict.get(s.resolution, G_lrate_base)
s.D_lrate = D_lrate_dict.get(s.resolution, D_lrate_base)
if lrate_rampup_kimg > 0:
rampup = min(s.kimg / lrate_rampup_kimg, 1.0)
s.G_lrate *= rampup
s.D_lrate *= rampup
# Other parameters.
s.tick_kimg = tick_kimg_dict.get(s.resolution, tick_kimg_base)
return s
#----------------------------------------------------------------------------
# Main training script.
def training_loop(
submit_config,
G_args = {}, # Options for generator network.
D_args = {}, # Options for discriminator network.
G_opt_args = {}, # Options for generator optimizer.
D_opt_args = {}, # Options for discriminator optimizer.
G_loss_args = {}, # Options for generator loss.
D_loss_args = {}, # Options for discriminator loss.
dataset_args = {}, # Options for dataset.load_dataset().
sched_args = {}, # Options for train.TrainingSchedule.
grid_args = {}, # Options for train.setup_snapshot_image_grid().
metric_arg_list = [], # Options for MetricGroup.
tf_config = {}, # Options for tflib.init_tf().
G_smoothing_kimg = 10.0, # Half-life of the running average of generator weights.
D_repeats = 1, # How many times the discriminator is trained per G iteration.
minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters.
reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced?
total_kimg = 15000, # Total length of the training, measured in thousands of real images.
mirror_augment = False, # Enable mirror augment?
drange_net = [-1,1], # Dynamic range used when feeding image data to the networks.
image_snapshot_ticks = 1, # How often to export image snapshots?
network_snapshot_ticks = 10, # How often to export network snapshots?
save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file?
save_weight_histograms = False, # Include weight histograms in the tfevents file?
resume_run_id = 'latest', # Run ID or network pkl to resume training from, None = start from scratch.
resume_snapshot = None, # Snapshot index to resume training from, None = autodetect.
resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule.
resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting.
# Initialize dnnlib and TensorFlow.
ctx = dnnlib.RunContext(submit_config, train)
tflib.init_tf(tf_config)
# Load training set.
training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **dataset_args)
# Construct networks.
with tf.device('/gpu:0'):
# Load pre-trained
if resume_run_id is not None:
if resume_run_id == 'latest':
network_pkl, resume_kimg = misc.locate_latest_pkl()
print('Loading networks from "%s"...' % network_pkl)
G, D, Gs = misc.load_pkl(network_pkl)
elif resume_run_id == 'restore_partial':
print('Restore partially...')
# Initialize networks
G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args)
D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args)
Gs = G.clone('Gs')
# Load pre-trained networks
assert restore_partial_fn != None
G_partial, D_partial, Gs_partial = pickle.load(open(restore_partial_fn, 'rb'))
# Restore (subset of) pre-trained weights
# (only parameters that match both name and shape)
G.copy_compatible_trainables_from(G_partial)
D.copy_compatible_trainables_from(D_partial)
Gs.copy_compatible_trainables_from(Gs_partial)
else:
network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot)
print('Loading networks from "%s"...' % network_pkl)
G, D, Gs = misc.load_pkl(network_pkl)
# Start from scratch
else:
print('Constructing networks...')
G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args)
D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args)
Gs = G.clone('Gs')
G.print_layers(); D.print_layers()
print('Building TensorFlow graph...')
with tf.name_scope('Inputs'), tf.device('/cpu:0'):
lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[])
lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])
minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[])
minibatch_split = minibatch_in // submit_config.num_gpus
Gs_beta = 0.5 ** tf.div(tf.cast(minibatch_in, tf.float32), G_smoothing_kimg * 1000.0) if G_smoothing_kimg > 0.0 else 0.0
G_opt = tflib.Optimizer(name='TrainG', learning_rate=lrate_in, **G_opt_args)
D_opt = tflib.Optimizer(name='TrainD', learning_rate=lrate_in, **D_opt_args)
for gpu in range(submit_config.num_gpus):
with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):
G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow')
D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow')
lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)]
reals, labels = training_set.get_minibatch_tf()
reals = process_reals(reals, lod_in, mirror_augment, training_set.dynamic_range, drange_net)
with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops):
G_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **G_loss_args)
with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops):
D_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals, labels=labels, **D_loss_args)
G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables)
D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables)
G_train_op = G_opt.apply_updates()
D_train_op = D_opt.apply_updates()
Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta)
with tf.device('/gpu:0'):
try:
peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse()
except tf.errors.NotFoundError:
peak_gpu_mem_op = tf.constant(0)
print('Setting up snapshot image grid...')
grid_size, grid_reals, grid_labels, grid_latents = misc.setup_snapshot_image_grid(G, training_set, **grid_args)
sched = training_schedule(cur_nimg=total_kimg*1000, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args)
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus)
print('Setting up run dir...')
misc.save_image_grid(grid_reals, os.path.join(submit_config.run_dir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size)
misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % resume_kimg), drange=drange_net, grid_size=grid_size)
summary_log = tf.summary.FileWriter(submit_config.run_dir)
if save_tf_graph:
summary_log.add_graph(tf.get_default_graph())
if save_weight_histograms:
G.setup_weight_histograms(); D.setup_weight_histograms()
metrics = metric_base.MetricGroup(metric_arg_list)
print('Training...\n')
ctx.update('', cur_epoch=resume_kimg, max_epoch=total_kimg)
maintenance_time = ctx.get_last_update_interval()
cur_nimg = int(resume_kimg * 1000)
cur_tick = 0
tick_start_nimg = cur_nimg
prev_lod = -1.0
while cur_nimg < total_kimg * 1000:
if ctx.should_stop(): break
# Choose training parameters and configure training ops.
sched = training_schedule(cur_nimg=cur_nimg, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args)
training_set.configure(sched.minibatch // submit_config.num_gpus, sched.lod)
if reset_opt_for_new_lod:
if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod):
G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state()
prev_lod = sched.lod
# Run training ops.
for _mb_repeat in range(minibatch_repeats):
for _D_repeat in range(D_repeats):
tflib.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch})
cur_nimg += sched.minibatch
tflib.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch})
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done:
cur_tick += 1
tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
tick_start_nimg = cur_nimg
tick_time = ctx.get_time_since_last_update()
total_time = ctx.get_time_since_start() + resume_time
# Report progress.
print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %-6.1f gpumem %-4.1f' % (
autosummary('Progress/tick', cur_tick),
autosummary('Progress/kimg', cur_nimg / 1000.0),
autosummary('Progress/lod', sched.lod),
autosummary('Progress/minibatch', sched.minibatch),
dnnlib.util.format_time(autosummary('Timing/total_sec', total_time)),
autosummary('Timing/sec_per_tick', tick_time),
autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),
autosummary('Timing/maintenance_sec', maintenance_time),
autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30)))
autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
# Save snapshots.
if cur_tick % image_snapshot_ticks == 0 or done:
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus)
misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size)
if cur_tick % network_snapshot_ticks == 0 or done or cur_tick == 1:
pkl = os.path.join(submit_config.run_dir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000))
misc.save_pkl((G, D, Gs), pkl)
metrics.run(pkl, run_dir=submit_config.run_dir, num_gpus=submit_config.num_gpus, tf_config=tf_config)
# Update summaries and RunContext.
metrics.update_autosummaries()
tflib.autosummary.save_summaries(summary_log, cur_nimg)
ctx.update('%.2f' % sched.lod, cur_epoch=cur_nimg // 1000, max_epoch=total_kimg)
maintenance_time = ctx.get_last_update_interval() - tick_time
# Write final results.
misc.save_pkl((G, D, Gs), os.path.join(submit_config.run_dir, 'network-final.pkl'))
summary_log.close()
ctx.close()
#----------------------------------------------------------------------------
| 16,910 | 54.084691 | 185 | py |
stylegan-encoder | stylegan-encoder-master/ffhq_dataset/landmarks_detector.py | import dlib
class LandmarksDetector:
def __init__(self, predictor_model_path):
"""
:param predictor_model_path: path to shape_predictor_68_face_landmarks.dat file
"""
self.detector = dlib.get_frontal_face_detector() # cnn_face_detection_model_v1 also can be used
self.shape_predictor = dlib.shape_predictor(predictor_model_path)
def get_landmarks(self, image):
img = dlib.load_rgb_image(image)
dets = self.detector(img, 1)
for detection in dets:
try:
face_landmarks = [(item.x, item.y) for item in self.shape_predictor(img, detection).parts()]
yield face_landmarks
except:
print("Exception in get_landmarks()!")
| 760 | 33.590909 | 108 | py |
stylegan-encoder | stylegan-encoder-master/ffhq_dataset/__init__.py | 0 | 0 | 0 | py | |
stylegan-encoder | stylegan-encoder-master/ffhq_dataset/face_alignment.py | import numpy as np
import scipy.ndimage
import os
import PIL.Image
def image_align(src_file, dst_file, face_landmarks, output_size=1024, transform_size=4096, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):
# Align function from FFHQ dataset pre-processing step
# https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
lm = np.array(face_landmarks)
lm_chin = lm[0 : 17] # left-right
lm_eyebrow_left = lm[17 : 22] # left-right
lm_eyebrow_right = lm[22 : 27] # left-right
lm_nose = lm[27 : 31] # top-down
lm_nostrils = lm[31 : 36] # top-down
lm_eye_left = lm[36 : 42] # left-clockwise
lm_eye_right = lm[42 : 48] # left-clockwise
lm_mouth_outer = lm[48 : 60] # left-clockwise
lm_mouth_inner = lm[60 : 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
x *= x_scale
y = np.flipud(x) * [-y_scale, y_scale]
c = eye_avg + eye_to_mouth * em_scale
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
# Load in-the-wild image.
if not os.path.isfile(src_file):
print('\nCannot find source image. Please run "--wilds" before "--align".')
return
img = PIL.Image.open(src_file).convert('RGBA').convert('RGB')
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = np.uint8(np.clip(np.rint(img), 0, 255))
if alpha:
mask = 1-np.clip(3.0 * mask, 0.0, 1.0)
mask = np.uint8(np.clip(np.rint(mask*255), 0, 255))
img = np.concatenate((img, mask), axis=2)
img = PIL.Image.fromarray(img, 'RGBA')
else:
img = PIL.Image.fromarray(img, 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
# Save aligned image.
img.save(dst_file, 'PNG')
| 4,533 | 47.752688 | 169 | py |
stylegan-encoder | stylegan-encoder-master/metrics/frechet_inception_distance.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Frechet Inception Distance (FID)."""
import os
import numpy as np
import scipy
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
class FID(metric_base.MetricBase):
def __init__(self, num_images, minibatch_per_gpu, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
inception = misc.load_pkl('https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn') # inception_v3_features.pkl
activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)
# Calculate statistics for reals.
cache_file = self._get_cache_file_for_reals(num_images=self.num_images)
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
if os.path.isfile(cache_file):
mu_real, sigma_real = misc.load_pkl(cache_file)
else:
for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)):
begin = idx * minibatch_size
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True)
if end == self.num_images:
break
mu_real = np.mean(activations, axis=0)
sigma_real = np.cov(activations, rowvar=False)
misc.save_pkl((mu_real, sigma_real), cache_file)
# Construct TensorFlow graph.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
inception_clone = inception.clone()
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True)
images = tflib.convert_images_to_uint8(images)
result_expr.append(inception_clone.get_output_for(images))
# Calculate statistics for fakes.
for begin in range(0, self.num_images, minibatch_size):
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin]
mu_fake = np.mean(activations, axis=0)
sigma_fake = np.cov(activations, rowvar=False)
# Calculate FID.
m = np.square(mu_fake - mu_real).sum()
s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member
dist = m + np.trace(sigma_fake + sigma_real - 2*s)
self._report_result(np.real(dist))
#----------------------------------------------------------------------------
| 3,335 | 44.69863 | 129 | py |
stylegan-encoder | stylegan-encoder-master/metrics/metric_base.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Common definitions for GAN metrics."""
import os
import time
import hashlib
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
import config
from training import misc
from training import dataset
#----------------------------------------------------------------------------
# Standard metrics.
fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8)
ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16)
ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16)
ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16)
ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16)
ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4)
dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging
#----------------------------------------------------------------------------
# Base class for metrics.
class MetricBase:
def __init__(self, name):
self.name = name
self._network_pkl = None
self._dataset_args = None
self._mirror_augment = None
self._results = []
self._eval_time = None
def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True):
self._network_pkl = network_pkl
self._dataset_args = dataset_args
self._mirror_augment = mirror_augment
self._results = []
if (dataset_args is None or mirror_augment is None) and run_dir is not None:
run_config = misc.parse_config_for_previous_run(run_dir)
self._dataset_args = dict(run_config['dataset'])
self._dataset_args['shuffle_mb'] = 0
self._mirror_augment = run_config['train'].get('mirror_augment', False)
time_begin = time.time()
with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager
_G, _D, Gs = misc.load_pkl(self._network_pkl)
self._evaluate(Gs, num_gpus=num_gpus)
self._eval_time = time.time() - time_begin
if log_results:
result_str = self.get_result_str()
if run_dir is not None:
log = os.path.join(run_dir, 'metric-%s.txt' % self.name)
with dnnlib.util.Logger(log, 'a'):
print(result_str)
else:
print(result_str)
def get_result_str(self):
network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
if len(network_name) > 29:
network_name = '...' + network_name[-26:]
result_str = '%-30s' % network_name
result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time)
for res in self._results:
result_str += ' ' + self.name + res.suffix + ' '
result_str += res.fmt % res.value
return result_str
def update_autosummaries(self):
for res in self._results:
tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value)
def _evaluate(self, Gs, num_gpus):
raise NotImplementedError # to be overridden by subclasses
def _report_result(self, value, suffix='', fmt='%-10.4f'):
self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
def _get_cache_file_for_reals(self, extension='pkl', **kwargs):
all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment)
all_args.update(self._dataset_args)
all_args.update(kwargs)
md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8'))
dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1]
return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension))
def _iterate_reals(self, minibatch_size):
dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args)
while True:
images, _labels = dataset_obj.get_minibatch_np(minibatch_size)
if self._mirror_augment:
images = misc.apply_mirror_augment(images)
yield images
def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
while True:
latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True)
yield images
#----------------------------------------------------------------------------
# Group of multiple metrics.
class MetricGroup:
def __init__(self, metric_kwarg_list):
self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list]
def run(self, *args, **kwargs):
for metric in self.metrics:
metric.run(*args, **kwargs)
def get_result_str(self):
return ' '.join(metric.get_result_str() for metric in self.metrics)
def update_autosummaries(self):
for metric in self.metrics:
metric.update_autosummaries()
#----------------------------------------------------------------------------
# Dummy metric for debugging purposes.
class DummyMetric(MetricBase):
def _evaluate(self, Gs, num_gpus):
_ = Gs, num_gpus
self._report_result(0.0)
#----------------------------------------------------------------------------
| 6,479 | 44.314685 | 177 | py |
stylegan-encoder | stylegan-encoder-master/metrics/perceptual_path_length.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Perceptual Path Length (PPL)."""
import numpy as np
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
# Normalize batch of vectors.
def normalize(v):
return v / tf.sqrt(tf.reduce_sum(tf.square(v), axis=-1, keepdims=True))
# Spherical interpolation of a batch of vectors.
def slerp(a, b, t):
a = normalize(a)
b = normalize(b)
d = tf.reduce_sum(a * b, axis=-1, keepdims=True)
p = t * tf.math.acos(d)
c = normalize(b - d * a)
d = a * tf.math.cos(p) + c * tf.math.sin(p)
return normalize(d)
#----------------------------------------------------------------------------
class PPL(metric_base.MetricBase):
def __init__(self, num_samples, epsilon, space, sampling, minibatch_per_gpu, **kwargs):
assert space in ['z', 'w']
assert sampling in ['full', 'end']
super().__init__(**kwargs)
self.num_samples = num_samples
self.epsilon = epsilon
self.space = space
self.sampling = sampling
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
# Construct TensorFlow graph.
distance_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
noise_vars = [var for name, var in Gs_clone.components.synthesis.vars.items() if name.startswith('noise')]
# Generate random latents and interpolation t-values.
lat_t01 = tf.random_normal([self.minibatch_per_gpu * 2] + Gs_clone.input_shape[1:])
lerp_t = tf.random_uniform([self.minibatch_per_gpu], 0.0, 1.0 if self.sampling == 'full' else 0.0)
# Interpolate in W or Z.
if self.space == 'w':
dlat_t01 = Gs_clone.components.mapping.get_output_for(lat_t01, None, is_validation=True)
dlat_t0, dlat_t1 = dlat_t01[0::2], dlat_t01[1::2]
dlat_e0 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis])
dlat_e1 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis] + self.epsilon)
dlat_e01 = tf.reshape(tf.stack([dlat_e0, dlat_e1], axis=1), dlat_t01.shape)
else: # space == 'z'
lat_t0, lat_t1 = lat_t01[0::2], lat_t01[1::2]
lat_e0 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis])
lat_e1 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis] + self.epsilon)
lat_e01 = tf.reshape(tf.stack([lat_e0, lat_e1], axis=1), lat_t01.shape)
dlat_e01 = Gs_clone.components.mapping.get_output_for(lat_e01, None, is_validation=True)
# Synthesize images.
with tf.control_dependencies([var.initializer for var in noise_vars]): # use same noise inputs for the entire minibatch
images = Gs_clone.components.synthesis.get_output_for(dlat_e01, is_validation=True, randomize_noise=False)
# Crop only the face region.
c = int(images.shape[2] // 8)
images = images[:, :, c*3 : c*7, c*2 : c*6]
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
if images.shape[2] > 256:
factor = images.shape[2] // 256
images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = tf.reduce_mean(images, axis=[3,5])
# Scale dynamic range from [-1,1] to [0,255] for VGG.
images = (images + 1) * (255 / 2)
# Evaluate perceptual distance.
img_e0, img_e1 = images[0::2], images[1::2]
distance_measure = misc.load_pkl('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2') # vgg16_zhang_perceptual.pkl
distance_expr.append(distance_measure.get_output_for(img_e0, img_e1) * (1 / self.epsilon**2))
# Sampling loop.
all_distances = []
for _ in range(0, self.num_samples, minibatch_size):
all_distances += tflib.run(distance_expr)
all_distances = np.concatenate(all_distances, axis=0)
# Reject outliers.
lo = np.percentile(all_distances, 1, interpolation='lower')
hi = np.percentile(all_distances, 99, interpolation='higher')
filtered_distances = np.extract(np.logical_and(lo <= all_distances, all_distances <= hi), all_distances)
self._report_result(np.mean(filtered_distances))
#----------------------------------------------------------------------------
| 5,225 | 46.944954 | 145 | py |
stylegan-encoder | stylegan-encoder-master/metrics/linear_separability.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Linear Separability (LS)."""
from collections import defaultdict
import numpy as np
import sklearn.svm
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
classifier_urls = [
'https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX', # celebahq-classifier-00-male.pkl
'https://drive.google.com/uc?id=1Q5c6HE__ReW2W8qYAXpao68V1ryuisGo', # celebahq-classifier-01-smiling.pkl
'https://drive.google.com/uc?id=1Q7738mgWTljPOJQrZtSMLxzShEhrvVsU', # celebahq-classifier-02-attractive.pkl
'https://drive.google.com/uc?id=1QBv2Mxe7ZLvOv1YBTLq-T4DS3HjmXV0o', # celebahq-classifier-03-wavy-hair.pkl
'https://drive.google.com/uc?id=1QIvKTrkYpUrdA45nf7pspwAqXDwWOLhV', # celebahq-classifier-04-young.pkl
'https://drive.google.com/uc?id=1QJPH5rW7MbIjFUdZT7vRYfyUjNYDl4_L', # celebahq-classifier-05-5-o-clock-shadow.pkl
'https://drive.google.com/uc?id=1QPZXSYf6cptQnApWS_T83sqFMun3rULY', # celebahq-classifier-06-arched-eyebrows.pkl
'https://drive.google.com/uc?id=1QPgoAZRqINXk_PFoQ6NwMmiJfxc5d2Pg', # celebahq-classifier-07-bags-under-eyes.pkl
'https://drive.google.com/uc?id=1QQPQgxgI6wrMWNyxFyTLSgMVZmRr1oO7', # celebahq-classifier-08-bald.pkl
'https://drive.google.com/uc?id=1QcSphAmV62UrCIqhMGgcIlZfoe8hfWaF', # celebahq-classifier-09-bangs.pkl
'https://drive.google.com/uc?id=1QdWTVwljClTFrrrcZnPuPOR4mEuz7jGh', # celebahq-classifier-10-big-lips.pkl
'https://drive.google.com/uc?id=1QgvEWEtr2mS4yj1b_Y3WKe6cLWL3LYmK', # celebahq-classifier-11-big-nose.pkl
'https://drive.google.com/uc?id=1QidfMk9FOKgmUUIziTCeo8t-kTGwcT18', # celebahq-classifier-12-black-hair.pkl
'https://drive.google.com/uc?id=1QthrJt-wY31GPtV8SbnZQZ0_UEdhasHO', # celebahq-classifier-13-blond-hair.pkl
'https://drive.google.com/uc?id=1QvCAkXxdYT4sIwCzYDnCL9Nb5TDYUxGW', # celebahq-classifier-14-blurry.pkl
'https://drive.google.com/uc?id=1QvLWuwSuWI9Ln8cpxSGHIciUsnmaw8L0', # celebahq-classifier-15-brown-hair.pkl
'https://drive.google.com/uc?id=1QxW6THPI2fqDoiFEMaV6pWWHhKI_OoA7', # celebahq-classifier-16-bushy-eyebrows.pkl
'https://drive.google.com/uc?id=1R71xKw8oTW2IHyqmRDChhTBkW9wq4N9v', # celebahq-classifier-17-chubby.pkl
'https://drive.google.com/uc?id=1RDn_fiLfEGbTc7JjazRXuAxJpr-4Pl67', # celebahq-classifier-18-double-chin.pkl
'https://drive.google.com/uc?id=1RGBuwXbaz5052bM4VFvaSJaqNvVM4_cI', # celebahq-classifier-19-eyeglasses.pkl
'https://drive.google.com/uc?id=1RIxOiWxDpUwhB-9HzDkbkLegkd7euRU9', # celebahq-classifier-20-goatee.pkl
'https://drive.google.com/uc?id=1RPaNiEnJODdr-fwXhUFdoSQLFFZC7rC-', # celebahq-classifier-21-gray-hair.pkl
'https://drive.google.com/uc?id=1RQH8lPSwOI2K_9XQCZ2Ktz7xm46o80ep', # celebahq-classifier-22-heavy-makeup.pkl
'https://drive.google.com/uc?id=1RXZM61xCzlwUZKq-X7QhxOg0D2telPow', # celebahq-classifier-23-high-cheekbones.pkl
'https://drive.google.com/uc?id=1RgASVHW8EWMyOCiRb5fsUijFu-HfxONM', # celebahq-classifier-24-mouth-slightly-open.pkl
'https://drive.google.com/uc?id=1RkC8JLqLosWMaRne3DARRgolhbtg_wnr', # celebahq-classifier-25-mustache.pkl
'https://drive.google.com/uc?id=1RqtbtFT2EuwpGTqsTYJDyXdnDsFCPtLO', # celebahq-classifier-26-narrow-eyes.pkl
'https://drive.google.com/uc?id=1Rs7hU-re8bBMeRHR-fKgMbjPh-RIbrsh', # celebahq-classifier-27-no-beard.pkl
'https://drive.google.com/uc?id=1RynDJQWdGOAGffmkPVCrLJqy_fciPF9E', # celebahq-classifier-28-oval-face.pkl
'https://drive.google.com/uc?id=1S0TZ_Hdv5cb06NDaCD8NqVfKy7MuXZsN', # celebahq-classifier-29-pale-skin.pkl
'https://drive.google.com/uc?id=1S3JPhZH2B4gVZZYCWkxoRP11q09PjCkA', # celebahq-classifier-30-pointy-nose.pkl
'https://drive.google.com/uc?id=1S3pQuUz-Jiywq_euhsfezWfGkfzLZ87W', # celebahq-classifier-31-receding-hairline.pkl
'https://drive.google.com/uc?id=1S6nyIl_SEI3M4l748xEdTV2vymB_-lrY', # celebahq-classifier-32-rosy-cheeks.pkl
'https://drive.google.com/uc?id=1S9P5WCi3GYIBPVYiPTWygrYIUSIKGxbU', # celebahq-classifier-33-sideburns.pkl
'https://drive.google.com/uc?id=1SANviG-pp08n7AFpE9wrARzozPIlbfCH', # celebahq-classifier-34-straight-hair.pkl
'https://drive.google.com/uc?id=1SArgyMl6_z7P7coAuArqUC2zbmckecEY', # celebahq-classifier-35-wearing-earrings.pkl
'https://drive.google.com/uc?id=1SC5JjS5J-J4zXFO9Vk2ZU2DT82TZUza_', # celebahq-classifier-36-wearing-hat.pkl
'https://drive.google.com/uc?id=1SDAQWz03HGiu0MSOKyn7gvrp3wdIGoj-', # celebahq-classifier-37-wearing-lipstick.pkl
'https://drive.google.com/uc?id=1SEtrVK-TQUC0XeGkBE9y7L8VXfbchyKX', # celebahq-classifier-38-wearing-necklace.pkl
'https://drive.google.com/uc?id=1SF_mJIdyGINXoV-I6IAxHB_k5dxiF6M-', # celebahq-classifier-39-wearing-necktie.pkl
]
#----------------------------------------------------------------------------
def prob_normalize(p):
p = np.asarray(p).astype(np.float32)
assert len(p.shape) == 2
return p / np.sum(p)
def mutual_information(p):
p = prob_normalize(p)
px = np.sum(p, axis=1)
py = np.sum(p, axis=0)
result = 0.0
for x in range(p.shape[0]):
p_x = px[x]
for y in range(p.shape[1]):
p_xy = p[x][y]
p_y = py[y]
if p_xy > 0.0:
result += p_xy * np.log2(p_xy / (p_x * p_y)) # get bits as output
return result
def entropy(p):
p = prob_normalize(p)
result = 0.0
for x in range(p.shape[0]):
for y in range(p.shape[1]):
p_xy = p[x][y]
if p_xy > 0.0:
result -= p_xy * np.log2(p_xy)
return result
def conditional_entropy(p):
# H(Y|X) where X corresponds to axis 0, Y to axis 1
# i.e., How many bits of additional information are needed to where we are on axis 1 if we know where we are on axis 0?
p = prob_normalize(p)
y = np.sum(p, axis=0, keepdims=True) # marginalize to calculate H(Y)
return max(0.0, entropy(y) - mutual_information(p)) # can slip just below 0 due to FP inaccuracies, clean those up.
#----------------------------------------------------------------------------
class LS(metric_base.MetricBase):
def __init__(self, num_samples, num_keep, attrib_indices, minibatch_per_gpu, **kwargs):
assert num_keep <= num_samples
super().__init__(**kwargs)
self.num_samples = num_samples
self.num_keep = num_keep
self.attrib_indices = attrib_indices
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
# Construct TensorFlow graph for each GPU.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
# Generate images.
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
dlatents = Gs_clone.components.mapping.get_output_for(latents, None, is_validation=True)
images = Gs_clone.components.synthesis.get_output_for(dlatents, is_validation=True, randomize_noise=True)
# Downsample to 256x256. The attribute classifiers were built for 256x256.
if images.shape[2] > 256:
factor = images.shape[2] // 256
images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = tf.reduce_mean(images, axis=[3, 5])
# Run classifier for each attribute.
result_dict = dict(latents=latents, dlatents=dlatents[:,-1])
for attrib_idx in self.attrib_indices:
classifier = misc.load_pkl(classifier_urls[attrib_idx])
logits = classifier.get_output_for(images, None)
predictions = tf.nn.softmax(tf.concat([logits, -logits], axis=1))
result_dict[attrib_idx] = predictions
result_expr.append(result_dict)
# Sampling loop.
results = []
for _ in range(0, self.num_samples, minibatch_size):
results += tflib.run(result_expr)
results = {key: np.concatenate([value[key] for value in results], axis=0) for key in results[0].keys()}
# Calculate conditional entropy for each attribute.
conditional_entropies = defaultdict(list)
for attrib_idx in self.attrib_indices:
# Prune the least confident samples.
pruned_indices = list(range(self.num_samples))
pruned_indices = sorted(pruned_indices, key=lambda i: -np.max(results[attrib_idx][i]))
pruned_indices = pruned_indices[:self.num_keep]
# Fit SVM to the remaining samples.
svm_targets = np.argmax(results[attrib_idx][pruned_indices], axis=1)
for space in ['latents', 'dlatents']:
svm_inputs = results[space][pruned_indices]
try:
svm = sklearn.svm.LinearSVC()
svm.fit(svm_inputs, svm_targets)
svm.score(svm_inputs, svm_targets)
svm_outputs = svm.predict(svm_inputs)
except:
svm_outputs = svm_targets # assume perfect prediction
# Calculate conditional entropy.
p = [[np.mean([case == (row, col) for case in zip(svm_outputs, svm_targets)]) for col in (0, 1)] for row in (0, 1)]
conditional_entropies[space].append(conditional_entropy(p))
# Calculate separability scores.
scores = {key: 2**np.sum(values) for key, values in conditional_entropies.items()}
self._report_result(scores['latents'], suffix='_z')
self._report_result(scores['dlatents'], suffix='_w')
#----------------------------------------------------------------------------
| 10,313 | 56.94382 | 140 | py |
stylegan-encoder | stylegan-encoder-master/metrics/__init__.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
# empty
| 350 | 38 | 76 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/util.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous utility classes and functions."""
import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
# Util classes
# ------------------------------------------------------------------------------------------
class EasyDict(dict):
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
class Logger(object):
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
self.file = None
if file_name is not None:
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> "Logger":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: str) -> None:
"""Write text to stdout (and a file) and optionally flush."""
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
return
if self.file is not None:
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
"""Flush written text to both stdout and a file, if open."""
if self.file is not None:
self.file.flush()
self.stdout.flush()
def close(self) -> None:
"""Flush, close possible files, and remove stdout/stderr mirroring."""
self.flush()
# if using multiple loggers, prevent closing in wrong order
if sys.stdout is self:
sys.stdout = self.stdout
if sys.stderr is self:
sys.stderr = self.stderr
if self.file is not None:
self.file.close()
# Small util functions
# ------------------------------------------------------------------------------------------
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
def ask_yes_no(question: str) -> bool:
"""Ask the user the question until the user inputs a valid answer."""
while True:
try:
print("{0} [y/n]".format(question))
return strtobool(input().lower())
except ValueError:
pass
def tuple_product(t: Tuple) -> Any:
"""Calculate the product of the tuple elements."""
result = 1
for v in t:
result *= v
return result
_str_to_ctype = {
"uint8": ctypes.c_ubyte,
"uint16": ctypes.c_uint16,
"uint32": ctypes.c_uint32,
"uint64": ctypes.c_uint64,
"int8": ctypes.c_byte,
"int16": ctypes.c_int16,
"int32": ctypes.c_int32,
"int64": ctypes.c_int64,
"float32": ctypes.c_float,
"float64": ctypes.c_double
}
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
type_str = None
if isinstance(type_obj, str):
type_str = type_obj
elif hasattr(type_obj, "__name__"):
type_str = type_obj.__name__
elif hasattr(type_obj, "name"):
type_str = type_obj.name
else:
raise RuntimeError("Cannot infer type name from input")
assert type_str in _str_to_ctype.keys()
my_dtype = np.dtype(type_str)
my_ctype = _str_to_ctype[type_str]
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
return my_dtype, my_ctype
def is_pickleable(obj: Any) -> bool:
try:
with io.BytesIO() as stream:
pickle.dump(obj, stream)
return True
except:
return False
# Functionality to import modules/objects by name, and call functions by name
# ------------------------------------------------------------------------------------------
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
"""Searches for the underlying module behind the name to some python object.
Returns the module and the object name (original name with module part removed)."""
# allow convenience shorthands, substitute them by full names
obj_name = re.sub("^np.", "numpy.", obj_name)
obj_name = re.sub("^tf.", "tensorflow.", obj_name)
# list alternatives for (module_name, local_obj_name)
parts = obj_name.split(".")
name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
# try each alternative in turn
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
return module, local_obj_name
except:
pass
# maybe some of the modules themselves contain errors?
for module_name, _local_obj_name in name_pairs:
try:
importlib.import_module(module_name) # may raise ImportError
except ImportError:
if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
raise
# maybe the requested attribute is missing?
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
except ImportError:
pass
# we are out of luck, but we have no idea why
raise ImportError(obj_name)
def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
"""Traverses the object name and returns the last (rightmost) python object."""
if obj_name == '':
return module
obj = module
for part in obj_name.split("."):
obj = getattr(obj, part)
return obj
def get_obj_by_name(name: str) -> Any:
"""Finds the python object with the given name."""
module, obj_name = get_module_from_obj_name(name)
return get_obj_from_module(module, obj_name)
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
"""Finds the python object with the given name and calls it as a function."""
assert func_name is not None
func_obj = get_obj_by_name(func_name)
assert callable(func_obj)
return func_obj(*args, **kwargs)
def get_module_dir_by_obj_name(obj_name: str) -> str:
"""Get the directory path of the module containing the given object name."""
module, _ = get_module_from_obj_name(obj_name)
return os.path.dirname(inspect.getfile(module))
def is_top_level_function(obj: Any) -> bool:
"""Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
def get_top_level_function_name(obj: Any) -> str:
"""Return the fully-qualified name of a top-level function."""
assert is_top_level_function(obj)
return obj.__module__ + "." + obj.__name__
# File system helpers
# ------------------------------------------------------------------------------------------
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
"""List all files recursively in a given directory while ignoring given file and directory names.
Returns list of tuples containing both absolute and relative paths."""
assert os.path.isdir(dir_path)
base_name = os.path.basename(os.path.normpath(dir_path))
if ignores is None:
ignores = []
result = []
for root, dirs, files in os.walk(dir_path, topdown=True):
for ignore_ in ignores:
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
# dirs need to be edited in-place
for d in dirs_to_remove:
dirs.remove(d)
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
absolute_paths = [os.path.join(root, f) for f in files]
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
if add_base_to_relative:
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
assert len(absolute_paths) == len(relative_paths)
result += zip(absolute_paths, relative_paths)
return result
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
"""Takes in a list of tuples of (src, dst) paths and copies files.
Will create all necessary directories."""
for file in files:
target_dir_name = os.path.dirname(file[1])
# will create all intermediate-level directories
if not os.path.exists(target_dir_name):
os.makedirs(target_dir_name)
shutil.copyfile(file[0], file[1])
# URL helpers
# ------------------------------------------------------------------------------------------
def is_url(obj: Any) -> bool:
"""Determine whether the given object is a valid URL string."""
if not isinstance(obj, str) or not "://" in obj:
return False
try:
res = requests.compat.urlparse(obj)
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
except:
return False
return True
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
if not is_url(url) and os.path.isfile(url):
return open(url, 'rb')
assert is_url(url)
assert num_attempts >= 1
# Lookup from cache.
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
if cache_dir is not None:
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
if len(cache_files) == 1:
return open(cache_files[0], "rb")
# Download.
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print("Downloading %s ..." % url, end="", flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if len(res.content) == 0:
raise IOError("No data received")
if len(res.content) < 8192:
content_str = res.content.decode("utf-8")
if "download_warning" in res.headers.get("Set-Cookie", ""):
links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
if len(links) == 1:
url = requests.compat.urljoin(url, links[0])
raise IOError("Google Drive virus checker nag")
if "Google Drive - Quota exceeded" in content_str:
raise IOError("Google Drive quota exceeded")
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
url_name = match[1] if match else url
url_data = res.content
if verbose:
print(" done")
break
except:
if not attempts_left:
if verbose:
print(" failed")
raise
if verbose:
print(".", end="", flush=True)
# Save to cache.
if cache_dir is not None:
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, "wb") as f:
f.write(url_data)
os.replace(temp_file, cache_file) # atomic
# Return data as file object.
return io.BytesIO(url_data)
| 13,836 | 32.831296 | 151 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/__init__.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import submission
from .submission.run_context import RunContext
from .submission.submit import SubmitTarget
from .submission.submit import PathType
from .submission.submit import SubmitConfig
from .submission.submit import get_path_from_template
from .submission.submit import submit_run
from .util import EasyDict
submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
| 797 | 37 | 126 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/tflib/tfutil.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous helper utils for Tensorflow."""
import os
import numpy as np
import tensorflow as tf
from typing import Any, Iterable, List, Union
TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation]
"""A type that represents a valid Tensorflow expression."""
TfExpressionEx = Union[TfExpression, int, float, np.ndarray]
"""A type that can be converted to a valid Tensorflow expression."""
def run(*args, **kwargs) -> Any:
"""Run the specified ops in the default session."""
assert_tf_initialized()
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x: Any) -> bool:
"""Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation."""
return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation))
def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
"""Convert a Tensorflow shape to a list of ints."""
return [dim.value for dim in shape]
def flatten(x: TfExpressionEx) -> TfExpression:
"""Shortcut function for flattening a tensor."""
with tf.name_scope("Flatten"):
return tf.reshape(x, [-1])
def log2(x: TfExpressionEx) -> TfExpression:
"""Logarithm in base 2."""
with tf.name_scope("Log2"):
return tf.log(x) * np.float32(1.0 / np.log(2.0))
def exp2(x: TfExpressionEx) -> TfExpression:
"""Exponent in base 2."""
with tf.name_scope("Exp2"):
return tf.exp(x * np.float32(np.log(2.0)))
def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx:
"""Linear interpolation."""
with tf.name_scope("Lerp"):
return a + (b - a) * t
def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression:
"""Linear interpolation with clip."""
with tf.name_scope("LerpClip"):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def absolute_name_scope(scope: str) -> tf.name_scope:
"""Forcefully enter the specified name scope, ignoring any surrounding scopes."""
return tf.name_scope(scope + "/")
def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope:
"""Forcefully enter the specified variable scope, ignoring any surrounding scopes."""
return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False)
def _sanitize_tf_config(config_dict: dict = None) -> dict:
# Defaults.
cfg = dict()
cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is.
cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is.
cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
# User overrides.
if config_dict is not None:
cfg.update(config_dict)
return cfg
def init_tf(config_dict: dict = None) -> None:
"""Initialize TensorFlow session using good default settings."""
# Skip if already initialized.
if tf.get_default_session() is not None:
return
# Setup config dict and random seeds.
cfg = _sanitize_tf_config(config_dict)
np_random_seed = cfg["rnd.np_random_seed"]
if np_random_seed is not None:
np.random.seed(np_random_seed)
tf_random_seed = cfg["rnd.tf_random_seed"]
if tf_random_seed == "auto":
tf_random_seed = np.random.randint(1 << 31)
if tf_random_seed is not None:
tf.set_random_seed(tf_random_seed)
# Setup environment variables.
for key, value in list(cfg.items()):
fields = key.split(".")
if fields[0] == "env":
assert len(fields) == 2
os.environ[fields[1]] = str(value)
# Create default TensorFlow session.
create_session(cfg, force_as_default=True)
def assert_tf_initialized():
"""Check that TensorFlow session has been initialized."""
if tf.get_default_session() is None:
raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().")
def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session:
"""Create tf.Session based on config dict."""
# Setup TensorFlow config proto.
cfg = _sanitize_tf_config(config_dict)
config_proto = tf.ConfigProto()
for key, value in cfg.items():
fields = key.split(".")
if fields[0] not in ["rnd", "env"]:
obj = config_proto
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
# Create session.
session = tf.Session(config=config_proto)
if force_as_default:
# pylint: disable=protected-access
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__() # pylint: disable=no-member
return session
def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
"""Initialize all tf.Variables that have not already been initialized.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tf.variables_initializer(tf.report_uninitialized_variables()).run()
"""
assert_tf_initialized()
if target_vars is None:
target_vars = tf.global_variables()
test_vars = []
test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in target_vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(":")[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
def set_vars(var_to_value_dict: dict) -> None:
"""Set the values of given tf.Variables.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
"""
assert_tf_initialized()
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(":")[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs):
"""Create tf.Variable with large initial value without bloating the tf graph."""
assert_tf_initialized()
assert isinstance(initial_value, np.ndarray)
zeros = tf.zeros(initial_value.shape, initial_value.dtype)
var = tf.Variable(zeros, *args, **kwargs)
set_vars({var: initial_value})
return var
def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):
"""Convert a minibatch of images from uint8 to float32 with configurable dynamic range.
Can be used as an input transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if nhwc_to_nchw:
images = tf.transpose(images, [0, 3, 1, 2])
return (images - drange[0]) * ((drange[1] - drange[0]) / 255)
def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1, uint8_cast=True):
"""Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
Can be used as an output transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if shrink > 1:
ksize = [1, 1, shrink, shrink]
images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
if nchw_to_nhwc:
images = tf.transpose(images, [0, 2, 3, 1])
scale = 255 / (drange[1] - drange[0])
images = images * scale + (0.5 - drange[0] * scale)
if uint8_cast:
images = tf.saturate_cast(images, tf.uint8)
return images
| 9,325 | 37.378601 | 173 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/tflib/autosummary.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for adding automatically tracked values to Tensorboard.
Autosummary creates an identity op that internally keeps track of the input
values and automatically shows up in TensorBoard. The reported value
represents an average over input components. The average is accumulated
constantly over time and flushed when save_summaries() is called.
Notes:
- The output tensor must be used as an input for something else in the
graph. Otherwise, the autosummary op will not get executed, and the average
value will not get accumulated.
- It is perfectly fine to include autosummaries with the same name in
several places throughout the graph, even if they are executed concurrently.
- It is ok to also pass in a python scalar or numpy array. In this case, it
is added to the average immediately.
"""
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2
from . import tfutil
from .tfutil import TfExpression
from .tfutil import TfExpressionEx
_dtype = tf.float64
_vars = OrderedDict() # name => [var, ...]
_immediate = OrderedDict() # name => update_op, update_value
_finalized = False
_merge_op = None
def _create_var(name: str, value_expr: TfExpression) -> TfExpression:
"""Internal helper for creating autosummary accumulators."""
assert not _finalized
name_id = name.replace("/", "_")
v = tf.cast(value_expr, _dtype)
if v.shape.is_fully_defined():
size = np.prod(tfutil.shape_to_list(v.shape))
size_expr = tf.constant(size, dtype=_dtype)
else:
size = None
size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))
if size == 1:
if v.shape.ndims != 0:
v = tf.reshape(v, [])
v = [size_expr, v, tf.square(v)]
else:
v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]
v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype))
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None):
var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _vars:
_vars[name].append(var)
else:
_vars[name] = [var]
return update_op
def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None) -> TfExpressionEx:
"""Create a new autosummary.
Args:
name: Name to use in TensorBoard
value: TensorFlow expression or python value to track
passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node.
Example use of the passthru mechanism:
n = autosummary('l2loss', loss, passthru=n)
This is a shorthand for the following code:
with tf.control_dependencies([autosummary('l2loss', loss)]):
n = tf.identity(n)
"""
tfutil.assert_tf_initialized()
name_id = name.replace("/", "_")
if tfutil.is_tf_expression(value):
with tf.name_scope("summary_" + name_id), tf.device(value.device):
update_op = _create_var(name, value)
with tf.control_dependencies([update_op]):
return tf.identity(value if passthru is None else passthru)
else: # python scalar or numpy array
if name not in _immediate:
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(_dtype)
update_op = _create_var(name, update_value)
_immediate[name] = update_op, update_value
update_op, update_value = _immediate[name]
tfutil.run(update_op, {update_value: value})
return value if passthru is None else passthru
def finalize_autosummaries() -> None:
"""Create the necessary ops to include autosummaries in TensorBoard report.
Note: This should be done only once per graph.
"""
global _finalized
tfutil.assert_tf_initialized()
if _finalized:
return None
_finalized = True
tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])
# Create summary ops.
with tf.device(None), tf.control_dependencies(None):
for name, vars_list in _vars.items():
name_id = name.replace("/", "_")
with tfutil.absolute_name_scope("Autosummary/" + name_id):
moments = tf.add_n(vars_list)
moments /= moments[0]
with tf.control_dependencies([moments]): # read before resetting
reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
mean = moments[1]
std = tf.sqrt(moments[2] - tf.square(moments[1]))
tf.summary.scalar(name, mean)
tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std)
tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std)
# Group by category and chart name.
cat_dict = OrderedDict()
for series_name in sorted(_vars.keys()):
p = series_name.split("/")
cat = p[0] if len(p) >= 2 else ""
chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1]
if cat not in cat_dict:
cat_dict[cat] = OrderedDict()
if chart not in cat_dict[cat]:
cat_dict[cat][chart] = []
cat_dict[cat][chart].append(series_name)
# Setup custom_scalar layout.
categories = []
for cat_name, chart_dict in cat_dict.items():
charts = []
for chart_name, series_names in chart_dict.items():
series = []
for series_name in series_names:
series.append(layout_pb2.MarginChartContent.Series(
value=series_name,
lower="xCustomScalars/" + series_name + "/margin_lo",
upper="xCustomScalars/" + series_name + "/margin_hi"))
margin = layout_pb2.MarginChartContent(series=series)
charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
categories.append(layout_pb2.Category(title=cat_name, chart=charts))
layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
return layout
def save_summaries(file_writer, global_step=None):
"""Call FileWriter.add_summary() with all summaries in the default graph,
automatically finalizing and merging them on the first call.
"""
global _merge_op
tfutil.assert_tf_initialized()
if _merge_op is None:
layout = finalize_autosummaries()
if layout is not None:
file_writer.add_summary(layout)
with tf.device(None), tf.control_dependencies(None):
_merge_op = tf.summary.merge_all()
file_writer.add_summary(_merge_op.eval(), global_step)
| 7,535 | 39.735135 | 127 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/tflib/network.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for managing networks."""
import types
import inspect
import re
import uuid
import sys
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import Any, List, Tuple, Union
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
_import_module_src = dict() # Source code for temporary modules created during pickle import.
def import_handler(handler_func):
"""Function decorator for declaring custom import handlers."""
_import_handlers.append(handler_func)
return handler_func
class Network:
"""Generic network abstraction.
Acts as a convenience wrapper for a parameterized network construction
function, providing several utility methods and convenient access to
the inputs/outputs/weights.
Network objects can be safely pickled and unpickled for long-term
archival purposes. The pickling works reliably as long as the underlying
network construction function is defined in a standalone Python module
that has no side effects or application-specific imports.
Args:
name: Network name. Used to select TensorFlow name and variable scopes.
func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
static_kwargs: Keyword arguments to be passed in to the network construction function.
Attributes:
name: User-specified name, defaults to build func name if None.
scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name.
static_kwargs: Arguments passed to the user-supplied build func.
components: Container for sub-networks. Passed to the build func, and retained between calls.
num_inputs: Number of input tensors.
num_outputs: Number of output tensors.
input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension.
output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension.
input_shape: Short-hand for input_shapes[0].
output_shape: Short-hand for output_shapes[0].
input_templates: Input placeholders in the template graph.
output_templates: Output tensors in the template graph.
input_names: Name string for each input.
output_names: Name string for each output.
own_vars: Variables defined by this network (local_name => var), excluding sub-networks.
vars: All variables (local_name => var).
trainables: All trainable variables (local_name => var).
var_global_to_local: Mapping from variable global names to local names.
"""
def __init__(self, name: str = None, func_name: Any = None, **static_kwargs):
tfutil.assert_tf_initialized()
assert isinstance(name, str) or name is None
assert func_name is not None
assert isinstance(func_name, str) or util.is_top_level_function(func_name)
assert util.is_pickleable(static_kwargs)
self._init_fields()
self.name = name
self.static_kwargs = util.EasyDict(static_kwargs)
# Locate the user-specified network build function.
if util.is_top_level_function(func_name):
func_name = util.get_top_level_function_name(func_name)
module, self._build_func_name = util.get_module_from_obj_name(func_name)
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Dig up source code for the module containing the build function.
self._build_module_src = _import_module_src.get(module, None)
if self._build_module_src is None:
self._build_module_src = inspect.getsource(module)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
def _init_fields(self) -> None:
self.name = None
self.scope = None
self.static_kwargs = util.EasyDict()
self.components = util.EasyDict()
self.num_inputs = 0
self.num_outputs = 0
self.input_shapes = [[]]
self.output_shapes = [[]]
self.input_shape = []
self.output_shape = []
self.input_templates = []
self.output_templates = []
self.input_names = []
self.output_names = []
self.own_vars = OrderedDict()
self.vars = OrderedDict()
self.trainables = OrderedDict()
self.var_global_to_local = OrderedDict()
self._build_func = None # User-supplied build function that constructs the network.
self._build_func_name = None # Name of the build function.
self._build_module_src = None # Full source code of the module containing the build function.
self._run_cache = dict() # Cached graph data for Network.run().
def _init_graph(self) -> None:
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
assert re.match("^[A-Za-z0-9_.\\-]*$", self.name)
with tf.name_scope(None):
self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs["is_template_graph"] = True
build_kwargs["components"] = self.components
# Build template graph.
with tfutil.absolute_variable_scope(self.scope, reuse=tf.AUTO_REUSE), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes
assert tf.get_variable_scope().name == self.scope
assert tf.get_default_graph().get_name_scope() == self.scope
with tf.control_dependencies(None): # ignore surrounding control dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, **build_kwargs)
# Collect outputs.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
assert all(tfutil.is_tf_expression(t) for t in self.output_templates)
# Perform sanity checks.
if any(t.shape.ndims is None for t in self.input_templates):
raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
if any(t.shape.ndims is None for t in self.output_templates):
raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
if any(not isinstance(comp, Network) for comp in self.components.values()):
raise ValueError("Components of a Network must be Networks themselves.")
if len(self.components) != len(set(comp.name for comp in self.components.values())):
raise ValueError("Components of a Network must have unique names.")
# List inputs and outputs.
self.input_shapes = [tfutil.shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [tfutil.shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
# List variables.
self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/"))
self.vars = OrderedDict(self.own_vars)
self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items())
self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable)
self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
def reset_own_vars(self) -> None:
"""Re-initialize all variables of this network, excluding sub-networks."""
tfutil.run([var.initializer for var in self.own_vars.values()])
def reset_vars(self) -> None:
"""Re-initialize all variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.vars.values()])
def reset_trainables(self) -> None:
"""Re-initialize all trainable variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.trainables.values()])
def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
"""Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s)."""
assert len(in_expr) == self.num_inputs
assert not all(expr is None for expr in in_expr)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs.update(dynamic_kwargs)
build_kwargs["is_template_graph"] = False
build_kwargs["components"] = self.components
# Build TensorFlow graph to evaluate the network.
with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name):
assert tf.get_variable_scope().name == self.scope
valid_inputs = [expr for expr in in_expr if expr is not None]
final_inputs = []
for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
if expr is not None:
expr = tf.identity(expr, name=name)
else:
expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
final_inputs.append(expr)
out_expr = self._build_func(*final_inputs, **build_kwargs)
# Propagate input shapes back to the user-specified expressions.
for expr, final in zip(in_expr, final_inputs):
if isinstance(expr, tf.Tensor):
expr.set_shape(final.shape)
# Express outputs in the desired format.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
return out_expr
def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
"""Get the local name of a given variable, without any surrounding name scopes."""
assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
return self.var_global_to_local[global_name]
def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
"""Find variable by local or global name."""
assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
"""Get the value of a given variable as NumPy array.
Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
return self.find_var(var_or_local_name).eval()
def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
"""Set the value of a given variable based on the given NumPy array.
Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
tfutil.set_vars({self.find_var(var_or_local_name): new_value})
def __getstate__(self) -> dict:
"""Pickle export."""
state = dict()
state["version"] = 3
state["name"] = self.name
state["static_kwargs"] = dict(self.static_kwargs)
state["components"] = dict(self.components)
state["build_module_src"] = self._build_module_src
state["build_func_name"] = self._build_func_name
state["variables"] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values()))))
return state
def __setstate__(self, state: dict) -> None:
"""Pickle import."""
# pylint: disable=attribute-defined-outside-init
tfutil.assert_tf_initialized()
self._init_fields()
# Execute custom import handlers.
for handler in _import_handlers:
state = handler(state)
# Set basic fields.
assert state["version"] in [2, 3]
self.name = state["name"]
self.static_kwargs = util.EasyDict(state["static_kwargs"])
self.components = util.EasyDict(state.get("components", {}))
self._build_module_src = state["build_module_src"]
self._build_func_name = state["build_func_name"]
# Create temporary module from the imported source code.
module_name = "_tflib_network_import_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_import_module_src[module] = self._build_module_src
exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used
# Locate network build function in the temporary module.
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]})
def clone(self, name: str = None, **new_static_kwargs) -> "Network":
"""Create a clone of this network with its own copy of the variables."""
# pylint: disable=protected-access
net = object.__new__(Network)
net._init_fields()
net.name = name if name is not None else self.name
net.static_kwargs = util.EasyDict(self.static_kwargs)
net.static_kwargs.update(new_static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
def copy_own_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, excluding sub-networks."""
names = [name for name in self.own_vars.keys() if name in src_net.own_vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, including sub-networks."""
names = [name for name in self.vars.keys() if name in src_net.vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_trainables_from(self, src_net: "Network") -> None:
"""Copy the values of all trainable variables from the given network, including sub-networks."""
names = [name for name in self.trainables.keys() if name in src_net.trainables]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_compatible_trainables_from(self, src_net: "Network") -> None:
"""Copy the compatible values of all trainable variables from the given network, including sub-networks"""
names = []
for name in self.trainables.keys():
if name not in src_net.trainables:
print("Not restoring (not present): {}".format(name))
elif self.trainables[name].shape != src_net.trainables[name].shape:
print("Not restoring (different shape): {}".format(name))
if name in src_net.trainables and self.trainables[name].shape == src_net.trainables[name].shape:
names.append(name)
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def apply_swa(self, src_net, epoch):
"""Perform stochastic weight averaging on the compatible values of all trainable variables from the given network, including sub-networks"""
names = []
for name in self.trainables.keys():
if name not in src_net.trainables:
print("Not restoring (not present): {}".format(name))
elif self.trainables[name].shape != src_net.trainables[name].shape:
print("Not restoring (different shape): {}".format(name))
if name in src_net.trainables and self.trainables[name].shape == src_net.trainables[name].shape:
names.append(name)
scale_new_data = 1.0 / (epoch + 1)
scale_moving_average = (1.0 - scale_new_data)
tfutil.set_vars(tfutil.run({self.vars[name]: (src_net.vars[name] * scale_new_data + self.vars[name] * scale_moving_average) for name in names}))
def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
"""Create new network with the given parameters, and copy all variables from this network."""
if new_name is None:
new_name = self.name
static_kwargs = dict(self.static_kwargs)
static_kwargs.update(new_static_kwargs)
net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
net.copy_vars_from(self)
return net
def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
"""Construct a TensorFlow op that updates the variables of this network
to be slightly closer to those of the given network."""
with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = tfutil.lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
def run(self,
*in_arrays: Tuple[Union[np.ndarray, None], ...],
input_transform: dict = None,
output_transform: dict = None,
return_as_list: bool = False,
print_progress: bool = False,
minibatch_size: int = None,
num_gpus: int = 1,
assume_frozen: bool = False,
custom_inputs=None,
**dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
"""Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
Args:
input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the input
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the output
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress: Print progress to the console? Useful for very large input arrays.
minibatch_size: Maximum minibatch size to use, None = disable batching.
num_gpus: Number of GPUs to use.
assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
dynamic_kwargs: Additional keyword arguments to be passed into the network build function.
custom_inputs: Allow to use another Tensor as input instead of default Placeholders
"""
assert len(in_arrays) == self.num_inputs
assert not all(arr is None for arr in in_arrays)
assert input_transform is None or util.is_top_level_function(input_transform["func"])
assert output_transform is None or util.is_top_level_function(output_transform["func"])
output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs)
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
# Construct unique hash key from all arguments that affect the TensorFlow graph.
key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs)
def unwind_key(obj):
if isinstance(obj, dict):
return [(key, unwind_key(value)) for key, value in sorted(obj.items())]
if callable(obj):
return util.get_top_level_function_name(obj)
return obj
key = repr(unwind_key(key))
# Build graph.
if key not in self._run_cache:
with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None):
if custom_inputs is not None:
with tf.device("/gpu:0"):
in_expr = [input_builder(name) for input_builder, name in zip(custom_inputs, self.input_names)]
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
else:
with tf.device("/cpu:0"):
in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
out_split = []
for gpu in range(num_gpus):
with tf.device("/gpu:%d" % gpu):
net_gpu = self.clone() if assume_frozen else self
in_gpu = in_split[gpu]
if input_transform is not None:
in_kwargs = dict(input_transform)
in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs)
in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)
assert len(in_gpu) == self.num_inputs
out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs)
if output_transform is not None:
out_kwargs = dict(output_transform)
out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs)
out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)
assert len(out_gpu) == self.num_outputs
out_split.append(out_gpu)
with tf.device("/cpu:0"):
out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
self._run_cache[key] = in_expr, out_expr
# Run minibatches.
in_expr, out_expr = self._run_cache[key]
out_arrays = [np.empty([num_items] + tfutil.shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print("\r%d / %d" % (mb_begin, num_items), end="")
mb_end = min(mb_begin + minibatch_size, num_items)
mb_num = mb_end - mb_begin
mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
for dst, src in zip(out_arrays, mb_out):
dst[mb_begin: mb_end] = src
# Done.
if print_progress:
print("\r%d / %d" % (num_items, num_items))
if not return_as_list:
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
return out_arrays
def list_ops(self) -> List[TfExpression]:
include_prefix = self.scope + "/"
exclude_prefix = include_prefix + "_"
ops = tf.get_default_graph().get_operations()
ops = [op for op in ops if op.name.startswith(include_prefix)]
ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
return ops
def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
"""Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
individual layers of the network. Mainly intended to be used for reporting."""
layers = []
def recurse(scope, parent_ops, parent_vars, level):
# Ignore specific patterns.
if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
return
# Filter ops and vars by scope.
global_prefix = scope + "/"
local_prefix = global_prefix[len(self.scope) + 1:]
cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]]
cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]]
if not cur_ops and not cur_vars:
return
# Filter out all ops related to variables.
for var in [op for op in cur_ops if op.type.startswith("Variable")]:
var_prefix = var.name + "/"
cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)]
# Scope does not contain ops as immediate children => recurse deeper.
contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type != "Identity" for op in cur_ops)
if (level == 0 or not contains_direct_ops) and (len(cur_ops) + len(cur_vars)) > 1:
visited = set()
for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]:
token = rel_name.split("/")[0]
if token not in visited:
recurse(global_prefix + token, cur_ops, cur_vars, level + 1)
visited.add(token)
return
# Report layer.
layer_name = scope[len(self.scope) + 1:]
layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1]
layer_trainables = [var for _name, var in cur_vars if var.trainable]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, self.list_ops(), list(self.vars.items()), 0)
return layers
def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
"""Print a summary table of the network structure."""
rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]]
rows += [["---"] * 4]
total_params = 0
for layer_name, layer_output, layer_trainables in self.list_layers():
num_params = sum(np.prod(tfutil.shape_to_list(var.shape)) for var in layer_trainables)
weights = [var for var in layer_trainables if var.name.endswith("/weight:0") or var.name.endswith("/weight_1:0")]
weights.sort(key=lambda x: len(x.name))
if len(weights) == 0 and len(layer_trainables) == 1:
weights = layer_trainables
total_params += num_params
if not hide_layers_with_no_params or num_params != 0:
num_params_str = str(num_params) if num_params > 0 else "-"
output_shape_str = str(layer_output.shape)
weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-"
rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]]
rows += [["---"] * 4]
rows += [["Total", str(total_params), "", ""]]
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
print()
def setup_weight_histograms(self, title: str = None) -> None:
"""Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
if title is None:
title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for local_name, var in self.trainables.items():
if "/" in local_name:
p = local_name.split("/")
name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
else:
name = title + "_toplevel/" + local_name
tf.summary.histogram(name, var)
#----------------------------------------------------------------------------
# Backwards-compatible emulation of legacy output transformation in Network.run().
_print_legacy_warning = True
def _handle_legacy_output_transforms(output_transform, dynamic_kwargs):
global _print_legacy_warning
legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"]
if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs):
return output_transform, dynamic_kwargs
if _print_legacy_warning:
_print_legacy_warning = False
print()
print("WARNING: Old-style output transformations in Network.run() are deprecated.")
print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'")
print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.")
print()
assert output_transform is None
new_kwargs = dict(dynamic_kwargs)
new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs}
new_transform["func"] = _legacy_output_transform_func
return new_transform, new_kwargs
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
if out_mul != 1.0:
expr = [x * out_mul for x in expr]
if out_add != 0.0:
expr = [x + out_add for x in expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
expr = [tf.round(x) for x in expr]
expr = [tf.saturate_cast(x, out_dtype) for x in expr]
return expr
| 32,314 | 50.375199 | 165 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/tflib/__init__.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import autosummary
from . import network
from . import optimizer
from . import tfutil
from .tfutil import *
from .network import Network
from .optimizer import Optimizer
| 522 | 29.764706 | 76 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/tflib/optimizer.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper wrapper for a Tensorflow optimizer."""
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import List, Union
from . import autosummary
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
try:
# TensorFlow 1.13
from tensorflow.python.ops import nccl_ops
except:
# Older TensorFlow versions
import tensorflow.contrib.nccl as nccl_ops
class Optimizer:
"""A Wrapper for tf.train.Optimizer.
Automatically takes care of:
- Gradient averaging for multi-GPU training.
- Dynamic loss scaling and typecasts for FP16 training.
- Ignoring corrupted gradients that contain NaNs/Infs.
- Reporting statistics.
- Well-chosen default settings.
"""
def __init__(self,
name: str = "Train",
tf_optimizer: str = "tf.train.AdamOptimizer",
learning_rate: TfExpressionEx = 0.001,
use_loss_scaling: bool = False,
loss_scaling_init: float = 64.0,
loss_scaling_inc: float = 0.0005,
loss_scaling_dec: float = 1.0,
**kwargs):
# Init fields.
self.name = name
self.learning_rate = tf.convert_to_tensor(learning_rate)
self.id = self.name.replace("/", ".")
self.scope = tf.get_default_graph().unique_name(self.id)
self.optimizer_class = util.get_obj_by_name(tf_optimizer)
self.optimizer_kwargs = dict(kwargs)
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self._grad_shapes = None # [shape, ...]
self._dev_opt = OrderedDict() # device => optimizer
self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]
self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)
self._updates_applied = False
def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
"""Register the gradients of the given loss function with respect to the given variables.
Intended to be called once per GPU."""
assert not self._updates_applied
# Validate arguments.
if isinstance(trainable_vars, dict):
trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars
assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss])
if self._grad_shapes is None:
self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars]
assert len(trainable_vars) == len(self._grad_shapes)
assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes))
dev = loss.device
assert all(var.device == dev for var in trainable_vars)
# Register device and compute gradients.
with tf.name_scope(self.id + "_grad"), tf.device(dev):
if dev not in self._dev_opt:
opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt)
assert callable(self.optimizer_class)
self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
self._dev_grads[dev] = []
loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage
grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros
self._dev_grads[dev].append(grads)
def apply_updates(self) -> tf.Operation:
"""Construct training op to update the registered variables based on their gradients."""
tfutil.assert_tf_initialized()
assert not self._updates_applied
self._updates_applied = True
devices = list(self._dev_grads.keys())
total_grads = sum(len(grads) for grads in self._dev_grads.values())
assert len(devices) >= 1 and total_grads >= 1
ops = []
with tfutil.absolute_name_scope(self.scope):
# Cast gradients to FP32 and calculate partial sum within each device.
dev_grads = OrderedDict() # device => [(grad, var), ...]
for dev_idx, dev in enumerate(devices):
with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev):
sums = []
for gv in zip(*self._dev_grads[dev]):
assert all(v is gv[0][1] for g, v in gv)
g = [tf.cast(g, tf.float32) for g, v in gv]
g = g[0] if len(g) == 1 else tf.add_n(g)
sums.append((g, gv[0][1]))
dev_grads[dev] = sums
# Sum gradients across devices.
if len(devices) > 1:
with tf.name_scope("SumAcrossGPUs"), tf.device(None):
for var_idx, grad_shape in enumerate(self._grad_shapes):
g = [dev_grads[dev][var_idx][0] for dev in devices]
if np.prod(grad_shape): # nccl does not support zero-sized tensors
g = nccl_ops.all_sum(g)
for dev, gg in zip(devices, g):
dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])
# Apply updates separately on each device.
for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev):
# Scale gradients as needed.
if self.use_loss_scaling or total_grads > 1:
with tf.name_scope("Scale"):
coef = tf.constant(np.float32(1.0 / total_grads), name="coef")
coef = self.undo_loss_scaling(coef)
grads = [(g * coef, v) for g, v in grads]
# Check for overflows.
with tf.name_scope("CheckOverflow"):
grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))
# Update weights and adjust loss scaling.
with tf.name_scope("UpdateWeights"):
# pylint: disable=cell-var-from-loop
opt = self._dev_opt[dev]
ls_var = self.get_loss_scaling_var(dev)
if not self.use_loss_scaling:
ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
else:
ops.append(tf.cond(grad_ok,
lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),
lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))
# Report statistics on the last device.
if dev == devices[-1]:
with tf.name_scope("Statistics"):
ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate))
ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1)))
if self.use_loss_scaling:
ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var))
# Initialize variables and group everything into a single op.
self.reset_optimizer_state()
tfutil.init_uninitialized_vars(list(self._dev_ls_var.values()))
return tf.group(*ops, name="TrainingOp")
def reset_optimizer_state(self) -> None:
"""Reset internal state of the underlying optimizer."""
tfutil.assert_tf_initialized()
tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])
def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
"""Get or create variable representing log2 of the current dynamic loss scaling factor."""
if not self.use_loss_scaling:
return None
if device not in self._dev_ls_var:
with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None):
self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var")
return self._dev_ls_var[device]
def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
"""Apply dynamic loss scaling for the given expression."""
assert tfutil.is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * tfutil.exp2(self.get_loss_scaling_var(value.device))
def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
"""Undo the effect of dynamic loss scaling for the given expression."""
assert tfutil.is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type
| 9,981 | 45.427907 | 164 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/submission/run_context.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helpers for managing the run/training loop."""
import datetime
import json
import os
import pprint
import time
import types
from typing import Any
from . import submit
class RunContext(object):
"""Helper class for managing the run/training loop.
The context will hide the implementation details of a basic run/training loop.
It will set things up properly, tell if run should be stopped, and then cleans up.
User should call update periodically and use should_stop to determine if run should be stopped.
Args:
submit_config: The SubmitConfig that is used for the current run.
config_module: The whole config module that is used for the current run.
max_epoch: Optional cached value for the max_epoch variable used in update.
"""
def __init__(self, submit_config: submit.SubmitConfig, config_module: types.ModuleType = None, max_epoch: Any = None):
self.submit_config = submit_config
self.should_stop_flag = False
self.has_closed = False
self.start_time = time.time()
self.last_update_time = time.time()
self.last_update_interval = 0.0
self.max_epoch = max_epoch
# pretty print the all the relevant content of the config module to a text file
if config_module is not None:
with open(os.path.join(submit_config.run_dir, "config.txt"), "w") as f:
filtered_dict = {k: v for k, v in config_module.__dict__.items() if not k.startswith("_") and not isinstance(v, (types.ModuleType, types.FunctionType, types.LambdaType, submit.SubmitConfig, type))}
pprint.pprint(filtered_dict, stream=f, indent=4, width=200, compact=False)
# write out details about the run to a text file
self.run_txt_data = {"task_name": submit_config.task_name, "host_name": submit_config.host_name, "start_time": datetime.datetime.now().isoformat(sep=" ")}
with open(os.path.join(submit_config.run_dir, "run.txt"), "w") as f:
pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)
def __enter__(self) -> "RunContext":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def update(self, loss: Any = 0, cur_epoch: Any = 0, max_epoch: Any = None) -> None:
"""Do general housekeeping and keep the state of the context up-to-date.
Should be called often enough but not in a tight loop."""
assert not self.has_closed
self.last_update_interval = time.time() - self.last_update_time
self.last_update_time = time.time()
if os.path.exists(os.path.join(self.submit_config.run_dir, "abort.txt")):
self.should_stop_flag = True
max_epoch_val = self.max_epoch if max_epoch is None else max_epoch
def should_stop(self) -> bool:
"""Tell whether a stopping condition has been triggered one way or another."""
return self.should_stop_flag
def get_time_since_start(self) -> float:
"""How much time has passed since the creation of the context."""
return time.time() - self.start_time
def get_time_since_last_update(self) -> float:
"""How much time has passed since the last call to update."""
return time.time() - self.last_update_time
def get_last_update_interval(self) -> float:
"""How much time passed between the previous two calls to update."""
return self.last_update_interval
def close(self) -> None:
"""Close the context and clean up.
Should only be called once."""
if not self.has_closed:
# update the run.txt with stopping time
self.run_txt_data["stop_time"] = datetime.datetime.now().isoformat(sep=" ")
with open(os.path.join(self.submit_config.run_dir, "run.txt"), "w") as f:
pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)
self.has_closed = True
| 4,347 | 42.48 | 213 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/submission/__init__.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import run_context
from . import submit
| 390 | 38.1 | 76 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/submission/submit.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Submit a function to be run either locally or in a computing cluster."""
import copy
import io
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import time
import traceback
import zipfile
from enum import Enum
from .. import util
from ..util import EasyDict
class SubmitTarget(Enum):
"""The target where the function should be run.
LOCAL: Run it locally.
"""
LOCAL = 1
class PathType(Enum):
"""Determines in which format should a path be formatted.
WINDOWS: Format with Windows style.
LINUX: Format with Linux/Posix style.
AUTO: Use current OS type to select either WINDOWS or LINUX.
"""
WINDOWS = 1
LINUX = 2
AUTO = 3
_user_name_override = None
class SubmitConfig(util.EasyDict):
"""Strongly typed config dict needed to submit runs.
Attributes:
run_dir_root: Path to the run dir root. Can be optionally templated with tags. Needs to always be run through get_path_from_template.
run_desc: Description of the run. Will be used in the run dir and task name.
run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir.
run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will be the src directory inside the run dir.
submit_target: Submit target enum value. Used to select where the run is actually launched.
num_gpus: Number of GPUs used/requested for the run.
print_info: Whether to print debug information when submitting.
ask_confirmation: Whether to ask a confirmation before submitting.
run_id: Automatically populated value during submit.
run_name: Automatically populated value during submit.
run_dir: Automatically populated value during submit.
run_func_name: Automatically populated value during submit.
run_func_kwargs: Automatically populated value during submit.
user_name: Automatically populated value during submit. Can be set by the user which will then override the automatic value.
task_name: Automatically populated value during submit.
host_name: Automatically populated value during submit.
"""
def __init__(self):
super().__init__()
# run (set these)
self.run_dir_root = "" # should always be passed through get_path_from_template
self.run_desc = ""
self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode"]
self.run_dir_extra_files = None
# submit (set these)
self.submit_target = SubmitTarget.LOCAL
self.num_gpus = 1
self.print_info = False
self.ask_confirmation = False
# (automatically populated)
self.run_id = None
self.run_name = None
self.run_dir = None
self.run_func_name = None
self.run_func_kwargs = None
self.user_name = None
self.task_name = None
self.host_name = "localhost"
def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:
"""Replace tags in the given path template and return either Windows or Linux formatted path."""
# automatically select path type depending on running OS
if path_type == PathType.AUTO:
if platform.system() == "Windows":
path_type = PathType.WINDOWS
elif platform.system() == "Linux":
path_type = PathType.LINUX
else:
raise RuntimeError("Unknown platform")
path_template = path_template.replace("<USERNAME>", get_user_name())
# return correctly formatted path
if path_type == PathType.WINDOWS:
return str(pathlib.PureWindowsPath(path_template))
elif path_type == PathType.LINUX:
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError("Unknown platform")
def get_template_from_path(path: str) -> str:
"""Convert a normal path back to its template representation."""
# replace all path parts with the template tags
path = path.replace("\\", "/")
return path
def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:
"""Convert a normal path to template and the convert it back to a normal path with given path type."""
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path
def set_user_name_override(name: str) -> None:
"""Set the global username override value."""
global _user_name_override
_user_name_override = name
def get_user_name():
"""Get the current user name."""
if _user_name_override is not None:
return _user_name_override
elif platform.system() == "Windows":
return os.getlogin()
elif platform.system() == "Linux":
try:
import pwd # pylint: disable=import-error
return pwd.getpwuid(os.geteuid()).pw_name # pylint: disable=no-member
except:
return "unknown"
else:
raise RuntimeError("Unknown platform")
def _create_run_dir_local(submit_config: SubmitConfig) -> str:
"""Create a new run dir with increasing ID number at the start."""
run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)
if not os.path.exists(run_dir_root):
print("Creating the run dir root: {}".format(run_dir_root))
os.makedirs(run_dir_root)
submit_config.run_id = _get_next_run_id_local(run_dir_root)
submit_config.run_name = "{0:05d}-{1}".format(submit_config.run_id, submit_config.run_desc)
run_dir = os.path.join(run_dir_root, submit_config.run_name)
if os.path.exists(run_dir):
raise RuntimeError("The run dir already exists! ({0})".format(run_dir))
print("Creating the run dir: {}".format(run_dir))
os.makedirs(run_dir)
return run_dir
def _get_next_run_id_local(run_dir_root: str) -> int:
"""Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id. Assumes IDs are numbers at the start of the directory names."""
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile("^\\d+") # match one or more digits at the start of the string
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if m is not None:
i = int(m.group())
run_id = max(run_id, i + 1)
return run_id
def _populate_run_dir(run_dir: str, submit_config: SubmitConfig) -> None:
"""Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable."""
print("Copying files to the run dir")
files = []
run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)
assert '.' in submit_config.run_func_name
for _idx in range(submit_config.run_func_name.count('.') - 1):
run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)
files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=False)
dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib")
files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=True)
if submit_config.run_dir_extra_files is not None:
files += submit_config.run_dir_extra_files
files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files]
files += [(os.path.join(dnnlib_module_dir_path, "submission", "_internal", "run.py"), os.path.join(run_dir, "run.py"))]
util.copy_files_and_create_dirs(files)
pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb"))
with open(os.path.join(run_dir, "submit_config.txt"), "w") as f:
pprint.pprint(submit_config, stream=f, indent=4, width=200, compact=False)
def run_wrapper(submit_config: SubmitConfig) -> None:
"""Wrap the actual run function call for handling logging, exceptions, typing, etc."""
is_local = submit_config.submit_target == SubmitTarget.LOCAL
checker = None
# when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
if is_local:
logger = util.Logger(file_name=os.path.join(submit_config.run_dir, "log.txt"), file_mode="w", should_flush=True)
else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
logger = util.Logger(file_name=None, should_flush=True)
import dnnlib
dnnlib.submit_config = submit_config
try:
print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
util.call_func_by_name(func_name=submit_config.run_func_name, submit_config=submit_config, **submit_config.run_func_kwargs)
print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, "log.txt")
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
finally:
open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close()
dnnlib.submit_config = None
logger.close()
if checker is not None:
checker.stop()
def submit_run(submit_config: SubmitConfig, run_func_name: str, **run_func_kwargs) -> None:
"""Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place."""
submit_config = copy.copy(submit_config)
if submit_config.user_name is None:
submit_config.user_name = get_user_name()
submit_config.run_func_name = run_func_name
submit_config.run_func_kwargs = run_func_kwargs
assert submit_config.submit_target == SubmitTarget.LOCAL
if submit_config.submit_target in {SubmitTarget.LOCAL}:
run_dir = _create_run_dir_local(submit_config)
submit_config.task_name = "{0}-{1:05d}-{2}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)
submit_config.run_dir = run_dir
_populate_run_dir(run_dir, submit_config)
if submit_config.print_info:
print("\nSubmit config:\n")
pprint.pprint(submit_config, indent=4, width=200, compact=False)
print()
if submit_config.ask_confirmation:
if not util.ask_yes_no("Continue submitting the job?"):
return
run_wrapper(submit_config)
| 11,185 | 37.439863 | 175 | py |
stylegan-encoder | stylegan-encoder-master/dnnlib/submission/_internal/run.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for launching run functions in computing clusters.
During the submit process, this file is copied to the appropriate run dir.
When the job is launched in the cluster, this module is the first thing that
is run inside the docker container.
"""
import os
import pickle
import sys
# PYTHONPATH should have been set so that the run_dir/src is in it
import dnnlib
def main():
if not len(sys.argv) >= 4:
raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!")
run_dir = str(sys.argv[1])
task_name = str(sys.argv[2])
host_name = str(sys.argv[3])
submit_config_path = os.path.join(run_dir, "submit_config.pkl")
# SubmitConfig should have been pickled to the run dir
if not os.path.exists(submit_config_path):
raise RuntimeError("SubmitConfig pickle file does not exist!")
submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb"))
dnnlib.submission.submit.set_user_name_override(submit_config.user_name)
submit_config.task_name = task_name
submit_config.host_name = host_name
dnnlib.submission.submit.run_wrapper(submit_config)
if __name__ == "__main__":
main()
| 1,543 | 32.565217 | 98 | py |
stylegan-encoder | stylegan-encoder-master/encoder/generator_model.py | import math
import tensorflow as tf
import numpy as np
import dnnlib.tflib as tflib
from functools import partial
def create_stub(name, batch_size):
return tf.constant(0, dtype='float32', shape=(batch_size, 0))
def create_variable_for_generator(name, batch_size, tiled_dlatent, model_scale=18, tile_size = 1):
if tiled_dlatent:
low_dim_dlatent = tf.get_variable('learnable_dlatents',
shape=(batch_size, tile_size, 512),
dtype='float32',
initializer=tf.initializers.random_normal())
return tf.tile(low_dim_dlatent, [1, model_scale // tile_size, 1])
else:
return tf.get_variable('learnable_dlatents',
shape=(batch_size, model_scale, 512),
dtype='float32',
initializer=tf.initializers.random_normal())
class Generator:
def __init__(self, model, batch_size, custom_input=None, clipping_threshold=2, tiled_dlatent=False, model_res=1024, randomize_noise=False):
self.batch_size = batch_size
self.tiled_dlatent=tiled_dlatent
self.model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
if tiled_dlatent:
self.initial_dlatents = np.zeros((self.batch_size, 1, 512))
model.components.synthesis.run(np.zeros((self.batch_size, self.model_scale, 512)),
randomize_noise=randomize_noise, minibatch_size=self.batch_size,
custom_inputs=[partial(create_variable_for_generator, batch_size=batch_size, tiled_dlatent=True, model_scale=self.model_scale),
partial(create_stub, batch_size=batch_size)],
structure='fixed')
else:
self.initial_dlatents = np.zeros((self.batch_size, self.model_scale, 512))
if custom_input is not None:
model.components.synthesis.run(self.initial_dlatents,
randomize_noise=randomize_noise, minibatch_size=self.batch_size,
custom_inputs=[partial(custom_input.eval(), batch_size=batch_size), partial(create_stub, batch_size=batch_size)],
structure='fixed')
else:
model.components.synthesis.run(self.initial_dlatents,
randomize_noise=randomize_noise, minibatch_size=self.batch_size,
custom_inputs=[partial(create_variable_for_generator, batch_size=batch_size, tiled_dlatent=False, model_scale=self.model_scale),
partial(create_stub, batch_size=batch_size)],
structure='fixed')
self.dlatent_avg_def = model.get_var('dlatent_avg')
self.reset_dlatent_avg()
self.sess = tf.get_default_session()
self.graph = tf.get_default_graph()
self.dlatent_variable = next(v for v in tf.global_variables() if 'learnable_dlatents' in v.name)
self._assign_dlatent_ph = tf.placeholder(tf.float32, name="assign_dlatent_ph")
self._assign_dlantent = tf.assign(self.dlatent_variable, self._assign_dlatent_ph)
self.set_dlatents(self.initial_dlatents)
def get_tensor(name):
try:
return self.graph.get_tensor_by_name(name)
except KeyError:
return None
self.generator_output = get_tensor('G_synthesis_1/_Run/concat:0')
if self.generator_output is None:
self.generator_output = get_tensor('G_synthesis_1/_Run/concat/concat:0')
if self.generator_output is None:
self.generator_output = get_tensor('G_synthesis_1/_Run/concat_1/concat:0')
# If we loaded only Gs and didn't load G or D, then scope "G_synthesis_1" won't exist in the graph.
if self.generator_output is None:
self.generator_output = get_tensor('G_synthesis/_Run/concat:0')
if self.generator_output is None:
self.generator_output = get_tensor('G_synthesis/_Run/concat/concat:0')
if self.generator_output is None:
self.generator_output = get_tensor('G_synthesis/_Run/concat_1/concat:0')
if self.generator_output is None:
for op in self.graph.get_operations():
print(op)
raise Exception("Couldn't find G_synthesis_1/_Run/concat tensor output")
self.generated_image = tflib.convert_images_to_uint8(self.generator_output, nchw_to_nhwc=True, uint8_cast=False)
self.generated_image_uint8 = tf.saturate_cast(self.generated_image, tf.uint8)
# Implement stochastic clipping similar to what is described in https://arxiv.org/abs/1702.04782
# (Slightly different in that the latent space is normal gaussian here and was uniform in [-1, 1] in that paper,
# so we clip any vector components outside of [-2, 2]. It seems fine, but I haven't done an ablation check.)
clipping_mask = tf.math.logical_or(self.dlatent_variable > clipping_threshold, self.dlatent_variable < -clipping_threshold)
clipped_values = tf.where(clipping_mask, tf.random_normal(shape=self.dlatent_variable.shape), self.dlatent_variable)
self.stochastic_clip_op = tf.assign(self.dlatent_variable, clipped_values)
def reset_dlatents(self):
self.set_dlatents(self.initial_dlatents)
def set_dlatents(self, dlatents):
if self.tiled_dlatent:
if (dlatents.shape != (self.batch_size, 1, 512)) and (dlatents.shape[1] != 512):
dlatents = np.mean(dlatents, axis=1, keepdims=True)
if (dlatents.shape != (self.batch_size, 1, 512)):
dlatents = np.vstack([dlatents, np.zeros((self.batch_size-dlatents.shape[0], 1, 512))])
assert (dlatents.shape == (self.batch_size, 1, 512))
else:
if (dlatents.shape[1] > self.model_scale):
dlatents = dlatents[:,:self.model_scale,:]
if (isinstance(dlatents.shape[0], int)):
if (dlatents.shape != (self.batch_size, self.model_scale, 512)):
dlatents = np.vstack([dlatents, np.zeros((self.batch_size-dlatents.shape[0], self.model_scale, 512))])
assert (dlatents.shape == (self.batch_size, self.model_scale, 512))
self.sess.run([self._assign_dlantent], {self._assign_dlatent_ph: dlatents})
return
else:
self._assign_dlantent = tf.assign(self.dlatent_variable, dlatents)
return
self.sess.run([self._assign_dlantent], {self._assign_dlatent_ph: dlatents})
def stochastic_clip_dlatents(self):
self.sess.run(self.stochastic_clip_op)
def get_dlatents(self):
return self.sess.run(self.dlatent_variable)
def get_dlatent_avg(self):
return self.dlatent_avg
def set_dlatent_avg(self, dlatent_avg):
self.dlatent_avg = dlatent_avg
def reset_dlatent_avg(self):
self.dlatent_avg = self.dlatent_avg_def
def generate_images(self, dlatents=None):
if dlatents is not None:
self.set_dlatents(dlatents)
return self.sess.run(self.generated_image_uint8)
| 7,108 | 50.514493 | 148 | py |
stylegan-encoder | stylegan-encoder-master/encoder/perceptual_model.py | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
#import tensorflow_probability as tfp
#tf.enable_eager_execution()
import os
import bz2
import PIL.Image
from PIL import ImageFilter
import numpy as np
from keras.models import Model
from keras.utils import get_file
from keras.applications.vgg16 import VGG16, preprocess_input
import keras.backend as K
import traceback
import dnnlib.tflib as tflib
def load_images(images_list, image_size=256, sharpen=False):
loaded_images = list()
for img_path in images_list:
img = PIL.Image.open(img_path).convert('RGB')
if image_size is not None:
img = img.resize((image_size,image_size),PIL.Image.LANCZOS)
if (sharpen):
img = img.filter(ImageFilter.DETAIL)
img = np.array(img)
img = np.expand_dims(img, 0)
loaded_images.append(img)
loaded_images = np.vstack(loaded_images)
return loaded_images
def tf_custom_adaptive_loss(a,b):
from adaptive import lossfun
shape = a.get_shape().as_list()
dim = np.prod(shape[1:])
a = tf.reshape(a, [-1, dim])
b = tf.reshape(b, [-1, dim])
loss, _, _ = lossfun(b-a, var_suffix='1')
return tf.math.reduce_mean(loss)
def tf_custom_adaptive_rgb_loss(a,b):
from adaptive import image_lossfun
loss, _, _ = image_lossfun(b-a, color_space='RGB', representation='PIXEL')
return tf.math.reduce_mean(loss)
def tf_custom_l1_loss(img1,img2):
return tf.math.reduce_mean(tf.math.abs(img2-img1), axis=None)
def tf_custom_logcosh_loss(img1,img2):
return tf.math.reduce_mean(tf.keras.losses.logcosh(img1,img2))
def create_stub(batch_size):
return tf.constant(0, dtype='float32', shape=(batch_size, 0))
def unpack_bz2(src_path):
data = bz2.BZ2File(src_path).read()
dst_path = src_path[:-4]
with open(dst_path, 'wb') as fp:
fp.write(data)
return dst_path
class PerceptualModel:
def __init__(self, args, batch_size=1, perc_model=None, sess=None):
self.sess = tf.get_default_session() if sess is None else sess
K.set_session(self.sess)
self.epsilon = 0.00000001
self.lr = args.lr
self.decay_rate = args.decay_rate
self.decay_steps = args.decay_steps
self.img_size = args.image_size
self.layer = args.use_vgg_layer
self.vgg_loss = args.use_vgg_loss
self.face_mask = args.face_mask
self.use_grabcut = args.use_grabcut
self.scale_mask = args.scale_mask
self.mask_dir = args.mask_dir
if (self.layer <= 0 or self.vgg_loss <= self.epsilon):
self.vgg_loss = None
self.pixel_loss = args.use_pixel_loss
if (self.pixel_loss <= self.epsilon):
self.pixel_loss = None
self.mssim_loss = args.use_mssim_loss
if (self.mssim_loss <= self.epsilon):
self.mssim_loss = None
self.lpips_loss = args.use_lpips_loss
if (self.lpips_loss <= self.epsilon):
self.lpips_loss = None
self.l1_penalty = args.use_l1_penalty
if (self.l1_penalty <= self.epsilon):
self.l1_penalty = None
self.adaptive_loss = args.use_adaptive_loss
self.sharpen_input = args.sharpen_input
self.batch_size = batch_size
if perc_model is not None and self.lpips_loss is not None:
self.perc_model = perc_model
else:
self.perc_model = None
self.ref_img = None
self.ref_weight = None
self.perceptual_model = None
self.ref_img_features = None
self.features_weight = None
self.loss = None
self.discriminator_loss = args.use_discriminator_loss
if (self.discriminator_loss <= self.epsilon):
self.discriminator_loss = None
if self.discriminator_loss is not None:
self.discriminator = None
self.stub = create_stub(batch_size)
if self.face_mask:
import dlib
self.detector = dlib.get_frontal_face_detector()
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
LANDMARKS_MODEL_URL, cache_subdir='temp'))
self.predictor = dlib.shape_predictor(landmarks_model_path)
def add_placeholder(self, var_name):
var_val = getattr(self, var_name)
setattr(self, var_name + "_placeholder", tf.placeholder(var_val.dtype, shape=var_val.get_shape()))
setattr(self, var_name + "_op", var_val.assign(getattr(self, var_name + "_placeholder")))
def assign_placeholder(self, var_name, var_val):
self.sess.run(getattr(self, var_name + "_op"), {getattr(self, var_name + "_placeholder"): var_val})
def build_perceptual_model(self, generator, discriminator=None):
# Learning rate
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step")
incremented_global_step = tf.assign_add(global_step, 1)
self._reset_global_step = tf.assign(global_step, 0)
self.learning_rate = tf.train.exponential_decay(self.lr, incremented_global_step,
self.decay_steps, self.decay_rate, staircase=True)
self.sess.run([self._reset_global_step])
if self.discriminator_loss is not None:
self.discriminator = discriminator
generated_image_tensor = generator.generated_image
generated_image = tf.image.resize_nearest_neighbor(generated_image_tensor,
(self.img_size, self.img_size), align_corners=True)
self.ref_img = tf.get_variable('ref_img', shape=generated_image.shape,
dtype='float32', initializer=tf.initializers.zeros())
self.ref_weight = tf.get_variable('ref_weight', shape=generated_image.shape,
dtype='float32', initializer=tf.initializers.zeros())
self.add_placeholder("ref_img")
self.add_placeholder("ref_weight")
if (self.vgg_loss is not None):
vgg16 = VGG16(include_top=False, input_shape=(self.img_size, self.img_size, 3))
self.perceptual_model = Model(vgg16.input, vgg16.layers[self.layer].output)
generated_img_features = self.perceptual_model(preprocess_input(self.ref_weight * generated_image))
self.ref_img_features = tf.get_variable('ref_img_features', shape=generated_img_features.shape,
dtype='float32', initializer=tf.initializers.zeros())
self.features_weight = tf.get_variable('features_weight', shape=generated_img_features.shape,
dtype='float32', initializer=tf.initializers.zeros())
self.sess.run([self.features_weight.initializer, self.features_weight.initializer])
self.add_placeholder("ref_img_features")
self.add_placeholder("features_weight")
if self.perc_model is not None and self.lpips_loss is not None:
img1 = tflib.convert_images_from_uint8(self.ref_weight * self.ref_img, nhwc_to_nchw=True)
img2 = tflib.convert_images_from_uint8(self.ref_weight * generated_image, nhwc_to_nchw=True)
self.loss = 0
# L1 loss on VGG16 features
if (self.vgg_loss is not None):
if self.adaptive_loss:
self.loss += self.vgg_loss * tf_custom_adaptive_loss(self.features_weight * self.ref_img_features, self.features_weight * generated_img_features)
else:
self.loss += self.vgg_loss * tf_custom_logcosh_loss(self.features_weight * self.ref_img_features, self.features_weight * generated_img_features)
# + logcosh loss on image pixels
if (self.pixel_loss is not None):
if self.adaptive_loss:
self.loss += self.pixel_loss * tf_custom_adaptive_rgb_loss(self.ref_weight * self.ref_img, self.ref_weight * generated_image)
else:
self.loss += self.pixel_loss * tf_custom_logcosh_loss(self.ref_weight * self.ref_img, self.ref_weight * generated_image)
# + MS-SIM loss on image pixels
if (self.mssim_loss is not None):
self.loss += self.mssim_loss * tf.math.reduce_mean(1-tf.image.ssim_multiscale(self.ref_weight * self.ref_img, self.ref_weight * generated_image, 1))
# + extra perceptual loss on image pixels
if self.perc_model is not None and self.lpips_loss is not None:
self.loss += self.lpips_loss * tf.math.reduce_mean(self.perc_model.get_output_for(img1, img2))
# + L1 penalty on dlatent weights
if self.l1_penalty is not None:
self.loss += self.l1_penalty * 512 * tf.math.reduce_mean(tf.math.abs(generator.dlatent_variable-generator.get_dlatent_avg()))
# discriminator loss (realism)
if self.discriminator_loss is not None:
self.loss += self.discriminator_loss * tf.math.reduce_mean(self.discriminator.get_output_for(tflib.convert_images_from_uint8(generated_image_tensor, nhwc_to_nchw=True), self.stub))
# - discriminator_network.get_output_for(tflib.convert_images_from_uint8(ref_img, nhwc_to_nchw=True), stub)
def generate_face_mask(self, im):
from imutils import face_utils
import cv2
rects = self.detector(im, 1)
# loop over the face detections
for (j, rect) in enumerate(rects):
"""
Determine the facial landmarks for the face region, then convert the facial landmark (x, y)-coordinates to a NumPy array
"""
shape = self.predictor(im, rect)
shape = face_utils.shape_to_np(shape)
# we extract the face
vertices = cv2.convexHull(shape)
mask = np.zeros(im.shape[:2],np.uint8)
cv2.fillConvexPoly(mask, vertices, 1)
if self.use_grabcut:
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (0,0,im.shape[1],im.shape[2])
(x,y),radius = cv2.minEnclosingCircle(vertices)
center = (int(x),int(y))
radius = int(radius*self.scale_mask)
mask = cv2.circle(mask,center,radius,cv2.GC_PR_FGD,-1)
cv2.fillConvexPoly(mask, vertices, cv2.GC_FGD)
cv2.grabCut(im,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1)
return mask
def set_reference_images(self, images_list):
assert(len(images_list) != 0 and len(images_list) <= self.batch_size)
loaded_image = load_images(images_list, self.img_size, sharpen=self.sharpen_input)
image_features = None
if self.perceptual_model is not None:
image_features = self.perceptual_model.predict_on_batch(preprocess_input(np.array(loaded_image)))
weight_mask = np.ones(self.features_weight.shape)
if self.face_mask:
image_mask = np.zeros(self.ref_weight.shape)
for (i, im) in enumerate(loaded_image):
try:
_, img_name = os.path.split(images_list[i])
mask_img = os.path.join(self.mask_dir, f'{img_name}')
if (os.path.isfile(mask_img)):
print("Loading mask " + mask_img)
imask = PIL.Image.open(mask_img).convert('L')
mask = np.array(imask)/255
mask = np.expand_dims(mask,axis=-1)
else:
mask = self.generate_face_mask(im)
imask = (255*mask).astype('uint8')
imask = PIL.Image.fromarray(imask, 'L')
print("Saving mask " + mask_img)
imask.save(mask_img, 'PNG')
mask = np.expand_dims(mask,axis=-1)
mask = np.ones(im.shape,np.float32) * mask
except Exception as e:
print("Exception in mask handling for " + mask_img)
traceback.print_exc()
mask = np.ones(im.shape[:2],np.uint8)
mask = np.ones(im.shape,np.float32) * np.expand_dims(mask,axis=-1)
image_mask[i] = mask
img = None
else:
image_mask = np.ones(self.ref_weight.shape)
if len(images_list) != self.batch_size:
if image_features is not None:
features_space = list(self.features_weight.shape[1:])
existing_features_shape = [len(images_list)] + features_space
empty_features_shape = [self.batch_size - len(images_list)] + features_space
existing_examples = np.ones(shape=existing_features_shape)
empty_examples = np.zeros(shape=empty_features_shape)
weight_mask = np.vstack([existing_examples, empty_examples])
image_features = np.vstack([image_features, np.zeros(empty_features_shape)])
images_space = list(self.ref_weight.shape[1:])
existing_images_space = [len(images_list)] + images_space
empty_images_space = [self.batch_size - len(images_list)] + images_space
existing_images = np.ones(shape=existing_images_space)
empty_images = np.zeros(shape=empty_images_space)
image_mask = image_mask * np.vstack([existing_images, empty_images])
loaded_image = np.vstack([loaded_image, np.zeros(empty_images_space)])
if image_features is not None:
self.assign_placeholder("features_weight", weight_mask)
self.assign_placeholder("ref_img_features", image_features)
self.assign_placeholder("ref_weight", image_mask)
self.assign_placeholder("ref_img", loaded_image)
def optimize(self, vars_to_optimize, iterations=200, use_optimizer='adam'):
vars_to_optimize = vars_to_optimize if isinstance(vars_to_optimize, list) else [vars_to_optimize]
if use_optimizer == 'lbfgs':
optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, var_list=vars_to_optimize, method='L-BFGS-B', options={'maxiter': iterations})
else:
if use_optimizer == 'ggt':
optimizer = tf.contrib.opt.GGTOptimizer(learning_rate=self.learning_rate)
else:
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
min_op = optimizer.minimize(self.loss, var_list=[vars_to_optimize])
self.sess.run(tf.variables_initializer(optimizer.variables()))
fetch_ops = [min_op, self.loss, self.learning_rate]
#min_op = optimizer.minimize(self.sess)
#optim_results = tfp.optimizer.lbfgs_minimize(make_val_and_grad_fn(get_loss), initial_position=vars_to_optimize, num_correction_pairs=10, tolerance=1e-8)
self.sess.run(self._reset_global_step)
#self.sess.graph.finalize() # Graph is read-only after this statement.
for _ in range(iterations):
if use_optimizer == 'lbfgs':
optimizer.minimize(self.sess, fetches=[vars_to_optimize, self.loss])
yield {"loss":self.loss.eval()}
else:
_, loss, lr = self.sess.run(fetch_ops)
yield {"loss":loss,"lr":lr}
| 15,587 | 49.775244 | 192 | py |
stylegan-encoder | stylegan-encoder-master/encoder/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/setup.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
import sys
long_description = "RocketQA development tools and examples, building on top of PaddlePaddle2.0."
with open("requirements.txt") as fin:
REQUIRED_PACKAGES = fin.read()
setuptools.setup(
name="rocketqa",
version="1.1.1",
author="dingyuchen",
author_email="dingyuchen@baidu.com",
description=long_description,
long_description=long_description,
long_description_content_type="text/plain",
url="https://github.com/PaddlePaddle/RocketQA",
packages=setuptools.find_packages(
where='.', exclude=('checkpoints*', 'research*', 'data*', 'inference_model*', 'examples*', 'dureader*')),
setup_requires=['cython', 'numpy'],
install_requires=REQUIRED_PACKAGES,
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
license='Apache 2.0')
| 1,737 | 36.782609 | 113 | py |
RocketQA | RocketQA-main/rocketqa/rocketqa.py | import os
import sys
import json
import paddle
import urllib
import numpy as np
import tarfile
import warnings
import hashlib
from tqdm import tqdm
from rocketqa.encoder.dual_encoder import DualEncoder
from rocketqa.encoder.cross_encoder import CrossEncoder
paddle.enable_static()
warnings.simplefilter('ignore')
__MODELS = {
"v1_marco_de": "http://rocketqa.bj.bcebos.com/RocketQAModels/v1_marco_de.tar.gz", # RocketQA v1 dual-encoder trained on MSMARCO
"v1_marco_ce": "http://rocketqa.bj.bcebos.com/RocketQAModels/v1_marco_ce.tar.gz", # RocketQA v1 cross-encoder trained on MSMARCO
"v1_nq_de": "http://rocketqa.bj.bcebos.com/RocketQAModels/v1_nq_de.tar.gz", # RocketQA v1 dual-encoder trained on Natural Question
"v1_nq_ce": "http://rocketqa.bj.bcebos.com/RocketQAModels/v1_nq_ce.tar.gz", # RocketQA v1 cross-encoder trained on Natural Question
"pair_marco_de": "http://rocketqa.bj.bcebos.com/RocketQAModels/pair_marco_de.tar.gz", # PAIR dual-encoder trained on MSMARCO
"pair_nq_de": "http://rocketqa.bj.bcebos.com/RocketQAModels/pair_nq_de.tar.gz", # PAIR dual-encoder trained on Natural Question
"v2_marco_de": "http://rocketqa.bj.bcebos.com/RocketQAModels/v2_marco_de.tar.gz", # RocketQA v2 dual-encoder trained on MSMARCO
"v2_marco_ce": "http://rocketqa.bj.bcebos.com/RocketQAModels/v2_marco_ce.tar.gz", # RocketQA v2 cross-encoder trained on MSMARCO
"v2_nq_de": "http://rocketqa.bj.bcebos.com/RocketQAModels/v2_nq_de.tar.gz", # RocketQA v2 dual-encoder trained on Natural Question
"zh_dureader_de": "http://rocketqa.bj.bcebos.com/RocketQAModels/zh_dureader_de.tar.gz", # RocketQA zh dual-encoder trained on Dureader
"zh_dureader_ce": "http://rocketqa.bj.bcebos.com/RocketQAModels/zh_dureader_ce.tar.gz", # RocketQA zh cross-encoder trained on Dureader
"zh_dureader_de_v2": "http://rocketqa.bj.bcebos.com/RocketQAModels/zh_dureader_de_v2.tar.gz",
"zh_dureader_ce_v2": "http://rocketqa.bj.bcebos.com/RocketQAModels/zh_dureader_ce_v2.tar.gz"
}
__MODELS_MD5 = {
"v1_marco_de": "d8210e4080935bd7fdad7a394cd60b66",
"v1_marco_ce": "caec5aedc46f22edd7107ecd793fc7fb",
"v1_nq_de": "cfeb70f82087b8a47bb0d6d6cfcd61c5",
"v1_nq_ce": "15aac78d70cc25994016b8a30d80f12c",
"pair_marco_de": "b4080ffa2999525e5ba2aa1f4e03a9e8",
"pair_nq_de": "d770bc379ec6def7e0588ec02c80ace2",
"v2_marco_de": "4ce64ff35d1d831f0ca989e49abde227",
"v2_marco_ce": "915ea7ff214a4a92a3a1e1d56c3fb469",
"v2_nq_de": "8f177aa75cadaad6656dcd981edc983b",
"zh_dureader_de": "39811675289c311236c667ad57ebd2d2",
"zh_dureader_ce": "11caeb179febc5f0a55fa10ae3f2d123",
"zh_dureader_de_v2": "889e62b0091bc350622549b57a2616ec",
"zh_dureader_ce_v2": "552675c98c546e798a33cc84325921f6"
}
def available_models():
"""
Return the names of available RocketQA models
"""
return __MODELS.keys()
def load_model(model, use_cuda=False, device_id=0, batch_size=1):
"""
Load a RocketQA model or an user-specified checkpoint
Args:
model: A model name return by `rocketqa.available_models()` or the path of an user-specified checkpoint config
use_cuda: Whether to use GPU
device_id: The device to put the model
batch_size: Batch_size during inference
Returns:
model
"""
model_type = ''
model_name = ''
rocketqa_model = False
encoder_conf = {}
if model in __MODELS:
model_name = model
print (f"RocketQA model [{model_name}]", file=sys.stderr)
rocketqa_model = True
model_path = os.path.expanduser('~/.rocketqa/') + model_name + '/'
if not os.path.exists(model_path):
if __download(model_name) is False:
raise Exception(f"RocketQA model [{model_name}] download failed, \
please check model dir [{model_path}]")
encoder_conf['conf_path'] = model_path + 'config.json'
encoder_conf['model_path'] = model_path
if model_name.find("_de") >= 0:
model_type = 'dual_encoder'
elif model_name.find("_ce") >= 0:
model_type = 'cross_encoder'
if rocketqa_model is False:
print ("User-specified model", file=sys.stderr)
conf_path = model
model_name = model
if not os.path.isfile(conf_path):
raise Exception(f"Config file [{conf_path}] not found")
try:
with open(conf_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
except Exception as e:
raise Exception(str(e) + f"\nConfig file [{conf_path}] load failed")
encoder_conf['conf_path'] = conf_path
split_p = conf_path.rfind('/')
if split_p > 0:
encoder_conf['model_path'] = conf_path[0:split_p + 1]
if "model_type" not in config_dict:
raise Exception("[model_type] not found in config file")
model_type = config_dict["model_type"]
if model_type != "dual_encoder" and model_type != "cross_encoder":
raise Exception("model_type [model_type] is illegal, must be `dual_encoder` or `cross_encoder`")
encoder_conf["use_cuda"] = use_cuda
encoder_conf["device_id"] = device_id
encoder_conf["batch_size"] = batch_size
encoder_conf["model_name"] = model_name
if model_type[0] == "d":
encoder = DualEncoder(**encoder_conf)
elif model_type[0] == "c":
encoder = CrossEncoder(**encoder_conf)
print ("Load model done", file=sys.stderr)
return encoder
def __download(model_name):
os.makedirs(os.path.expanduser('~/.rocketqa/'), exist_ok=True)
filename = model_name + '.tar.gz'
download_dst = os.path.join(os.path.expanduser('~/.rocketqa/') + filename)
download_url = __MODELS[model_name]
if not os.path.exists(download_dst):
print (f"Download RocketQA model [{model_name}]", file=sys.stderr)
with urllib.request.urlopen(download_url) as source, open(download_dst, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
file_md5= __get_file_md5(download_dst)
if file_md5 != __MODELS_MD5[model_name]:
raise Exception(f"Model file [{download_dst}] exists, but md5 doesnot match")
try:
t = tarfile.open(download_dst)
t.extractall(os.path.expanduser('~/.rocketqa/'))
except Exception as e:
print (str(e), file=sys.stderr)
return False
return True
def __get_file_md5(fname):
m = hashlib.md5()
with open(fname,'rb') as fobj:
while True:
data = fobj.read(4096)
if not data:
break
m.update(data)
return m.hexdigest()
if __name__ == '__main__':
pass
| 7,200 | 40.866279 | 151 | py |
RocketQA | RocketQA-main/rocketqa/__init__.py | # Copyright (c) 2021 RocketQA Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '1.1.0'
from .rocketqa import load_model
from .rocketqa import available_models
| 702 | 36 | 74 | py |
RocketQA | RocketQA-main/rocketqa/encoder/dual_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import json
import math
import logging
import time
import multiprocessing
import numpy as np
from tqdm import tqdm, trange
import shutil
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
from rocketqa.reader import reader_de_predict, reader_de_train
from rocketqa.model.ernie import ErnieConfig
from rocketqa.model.dual_encoder_predict import create_predict_model
from rocketqa.model.dual_encoder_train import create_train_model
from rocketqa.utils.args import print_arguments, check_cuda, prepare_logger
from rocketqa.utils.init import init_pretraining_params, init_checkpoint
from rocketqa.utils.finetune_args import parser
from rocketqa.utils.optimization import optimization
class DualEncoder(object):
def __init__(self, conf_path, use_cuda=False, device_id=0, batch_size=1, **kwargs):
if "model_path" in kwargs:
args = self._parse_args(conf_path, model_path=kwargs["model_path"])
else:
args = self._parse_args(conf_path)
if "model_name" in kwargs:
args.model_name = kwargs["model_name"].replace('/', '-')
else:
args.model_name = "my_de"
args.use_cuda = use_cuda
self.ernie_config = ErnieConfig(args.ernie_config_path)
args.batch_size = batch_size
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[device_id]
dev_count = 1
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
self.exe = fluid.Executor(place)
self.predict_reader = reader_de_predict.DEPredictorReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
self.startup_prog = fluid.Program()
self.test_prog = fluid.Program()
with fluid.program_guard(self.test_prog, self.startup_prog):
with fluid.unique_name.guard():
self.test_pyreader, self.graph_vars = create_predict_model(
args,
pyreader_name=args.model_name + '_test_reader',
ernie_config=self.ernie_config,
is_prediction=True,
share_parameter=args.share_parameter)
self.test_prog = self.test_prog.clone(for_test=True)
self.exe.run(self.startup_prog)
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_pretraining_params(
self.exe,
args.init_checkpoint,
main_program=self.startup_prog)
self.args = args
def _parse_args(self, conf_path, model_path=''):
args, unknown = parser.parse_known_args()
with open(conf_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
self.config_dict = config_dict
args.do_train = False
args.do_val = False
args.do_test = True
args.use_fast_executor = True
args.q_max_seq_len = config_dict['q_max_seq_len']
args.p_max_seq_len = config_dict['p_max_seq_len']
args.ernie_config_path = model_path + config_dict['model_conf_path']
args.vocab_path = model_path + config_dict['model_vocab_path']
args.init_checkpoint = model_path + config_dict['model_checkpoint_path']
if "for_cn" in config_dict:
args.for_cn = config_dict["for_cn"]
if 'share_parameter' in config_dict:
args.share_parameter = config_dict['share_parameter']
else:
args.share_parameter = 0
return args
def _parse_train_args(self, train_set, epoch, save_model_path, config_dict):
self.args.train_set = train_set
self.args.save_model_path = save_model_path
self.args.epoch = epoch
if "save_steps" in config_dict:
self.args.save_steps = config_dict['save_steps']
else:
self.args.save_steps = 0
if "batch_size" in config_dict:
self.args.batch_size = config_dict['batch_size']
if 'learning_rate' in config_dict:
self.args.learning_rate = config_dict['learning_rate']
else:
self.args.learning_rate = 2e-5
if 'log_folder' in config_dict:
self.args.log_folder = config_dict['log_folder']
def encode_query(self, query):
data = []
for q in query:
data.append(q + '\t-\t-')
self.test_pyreader.decorate_tensor_provider(
self.predict_reader.data_generator(
data,
self.args.batch_size,
shuffle=False))
self.test_pyreader.start()
fetch_list = [self.graph_vars["q_rep"]]
while True:
try:
q_rep = self.exe.run(program=self.test_prog,
fetch_list=fetch_list)
for data_q_rep in q_rep[0]:
yield data_q_rep
except fluid.core.EOFException:
self.test_pyreader.reset()
break
return
def encode_para(self, para, title=[]):
data = []
if len(title) != 0:
assert (len(para) == len(title)), "The input para(List) and title(List) should be the same length"
for t, p in zip(title, para):
data.append('-\t' + t + '\t' + p)
else:
for p in para:
data.append('-\t\t' + p)
self.test_pyreader.decorate_tensor_provider(
self.predict_reader.data_generator(
data,
self.args.batch_size,
shuffle=False))
self.test_pyreader.start()
fetch_list = [self.graph_vars["p_rep"]]
# while True:
for idx in trange(0, len(para), self.args.batch_size, desc='encode para'):
try:
p_rep = self.exe.run(program=self.test_prog,
fetch_list=fetch_list)
for data_p_rep in p_rep[0]:
yield data_p_rep
except fluid.core.EOFException:
self.test_pyreader.reset()
break
self.test_pyreader.reset()
return
def matching(self, query, para, title=[]):
data = []
assert (len(para) == len(query)), "The input query(List) and para(List) should be the same length"
if len(title) != 0:
assert (len(para) == len(title)), "The input query(List) and para(List) should be the same length"
for q, t, p in zip(query, title, para):
data.append(q + '\t' + t + '\t' + p)
else:
for q, p in zip(query, para):
data.append(q + '\t\t' + p)
self.test_pyreader.decorate_tensor_provider(
self.predict_reader.data_generator(
data,
self.args.batch_size,
shuffle=False))
self.test_pyreader.start()
fetch_list = [self.graph_vars["probs"]]
inner_probs = []
while True:
try:
probs = self.exe.run(program=self.test_prog,
fetch_list=fetch_list)
#inner_probs.extend(probs[0].tolist())
for data_prob in probs[0].tolist():
yield data_prob
except fluid.core.EOFException:
self.test_pyreader.reset()
break
return
def train(self, train_set, epoch, save_model_path, **kwargs):
self._parse_train_args(train_set, epoch, save_model_path, kwargs)
check_cuda(self.args.use_cuda)
log = logging.getLogger()
dev_count = 1
if self.args.log_folder == '':
self.args.log_folder = '.'
if not os.path.exists(self.args.log_folder):
os.makedirs(self.args.log_folder)
prepare_logger(log, save_to_file=self.args.log_folder + '/log.train')
print_arguments(self.args, log)
reader = reader_de_train.DETrainReader(
vocab_path=self.args.vocab_path,
label_map_config=self.args.label_map_config,
q_max_seq_len=self.args.q_max_seq_len,
p_max_seq_len=self.args.p_max_seq_len,
total_num=self.args.train_data_size,
do_lower_case=self.args.do_lower_case,
in_tokens=self.args.in_tokens,
random_seed=self.args.random_seed,
tokenizer=self.args.tokenizer,
for_cn=self.args.for_cn,
task_id=self.args.task_id)
startup_prog = fluid.Program()
if self.args.random_seed is not None:
startup_prog.random_seed = self.args.random_seed
train_data_generator = reader.data_generator(
input_file=self.args.train_set,
batch_size=self.args.batch_size,
epoch=self.args.epoch,
dev_count=dev_count,
shuffle=True,
phase="train")
num_train_examples = reader.get_num_examples(self.args.train_set)
if self.args.save_steps == 0:
self.args.save_steps = int(math.ceil(num_train_examples * self.args.epoch / self.args.batch_size / 2))
max_train_steps = self.args.epoch * num_train_examples // self.args.batch_size // dev_count
warmup_steps = int(max_train_steps * self.args.warmup_proportion)
log.info("Device count: %d" % dev_count)
log.info("Num train examples: %d" % num_train_examples)
log.info("Max train steps: %d" % max_train_steps)
log.info("Num warmup steps: %d" % warmup_steps)
log.info("Learning rate: %f" % self.args.learning_rate)
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, graph_vars = create_train_model(
self.args,
pyreader_name='train_reader',
ernie_config=self.ernie_config,
batch_size=self.args.batch_size)
scheduled_lr = optimization(
loss=graph_vars["loss"],
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=self.args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=self.args.weight_decay,
scheduler=self.args.lr_scheduler,
use_dynamic_loss_scaling=self.args.use_dynamic_loss_scaling,
incr_every_n_steps=self.args.incr_every_n_steps,
decr_every_n_nan_or_inf=self.args.decr_every_n_nan_or_inf,
incr_ratio=self.args.incr_ratio,
decr_ratio=self.args.decr_ratio)
self.exe.run(startup_prog)
init_pretraining_params(
self.exe,
self.args.init_checkpoint,
main_program=startup_prog)
train_pyreader.decorate_tensor_provider(train_data_generator)
train_pyreader.start()
steps = 0
if warmup_steps > 0:
graph_vars["learning_rate"] = scheduled_lr
time_begin = time.time()
last_epoch = 0
current_epoch = 0
total_loss = []
while True:
try:
steps += 1
if steps % self.args.skip_steps != 0:
self.exe.run(fetch_list=[], program=train_program)
else:
time_end = time.time()
used_time = time_end - time_begin
current_example, current_epoch = reader.get_train_progress()
train_fetch_list = [
graph_vars["loss"], graph_vars["accuracy"]
]
outputs = self.exe.run(fetch_list=train_fetch_list, program=train_program)
tmp_loss = np.mean(outputs[0])
tmp_acc = np.mean(outputs[1])
total_loss.append(tmp_loss)
if self.args.verbose:
verbose = "train pyreader queue size: %d, " % train_pyreader.queue.size(
)
verbose += "learning rate: %f" % (
outputs["learning_rate"]
if warmup_steps > 0 else self.args.learning_rate)
log.info(verbose)
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, np.mean(total_loss), tmp_acc,
self.args.skip_steps / used_time))
time_begin = time.time()
if steps % self.args.save_steps == 0:
save_path = os.path.join(self.args.save_model_path,
"step_" + str(steps))
fluid.io.save_persistables(self.exe, save_path, train_program)
config_save_path = os.path.join(self.args.save_model_path, "config.json")
json.dump(self.config_dict, open(config_save_path, "w"))
shutil.copy(self.args.ernie_config_path, self.args.save_model_path)
shutil.copy(self.args.vocab_path, self.args.save_model_path)
if last_epoch != current_epoch:
last_epoch = current_epoch
except fluid.core.EOFException:
save_path = os.path.join(self.args.save_model_path, "step_" + str(steps))
fluid.io.save_persistables(self.exe, save_path, train_program)
config_save_path = os.path.join(self.args.save_model_path, "config.json")
json.dump(self.config_dict, open(config_save_path, "w"))
shutil.copy(self.args.ernie_config_path, self.args.save_model_path)
shutil.copy(self.args.vocab_path, self.args.save_model_path)
train_pyreader.reset()
break
| 15,636 | 38.190476 | 114 | py |
RocketQA | RocketQA-main/rocketqa/encoder/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/rocketqa/encoder/cross_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import json
import math
import multiprocessing
import random
import numpy as np
import logging
import time
import shutil
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
from rocketqa.reader import reader_ce_predict, reader_ce_train
from rocketqa.model.ernie import ErnieConfig
from rocketqa.model.cross_encoder_predict import create_predict_model
from rocketqa.model.cross_encoder_train import create_train_model
from rocketqa.utils.args import print_arguments, check_cuda, prepare_logger
from rocketqa.utils.init import init_pretraining_params
from rocketqa.utils.finetune_args import parser
from rocketqa.utils.optimization import optimization
class CrossEncoder(object):
def __init__(self, conf_path, use_cuda=False, device_id=0, batch_size=1, **kwargs):
if "model_path" in kwargs:
args = self._parse_args(conf_path, model_path=kwargs["model_path"])
else:
args = self._parse_args(conf_path)
if "model_name" in kwargs:
args.model_name = kwargs["model_name"].replace('/', '-')
else:
args.model_name = "my_ce"
args.use_cuda = use_cuda
args.batch_size = batch_size
self.ernie_config = ErnieConfig(args.ernie_config_path)
if use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[device_id]
dev_count = 1
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
self.exe = fluid.Executor(place)
self.predict_reader = reader_ce_predict.CEPredictorReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
max_seq_len=args.max_seq_len,
total_num=args.train_data_size,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
self.startup_prog = fluid.Program()
if args.random_seed is not None:
self.startup_prog.random_seed = args.random_seed
self.test_prog = fluid.Program()
with fluid.program_guard(self.test_prog, self.startup_prog):
with fluid.unique_name.guard():
self.test_pyreader, self.graph_vars = create_predict_model(
args,
pyreader_name=args.model_name + '_test_reader',
ernie_config=self.ernie_config,
is_prediction=True,
joint_training=self.joint_training)
self.test_prog = self.test_prog.clone(for_test=True)
self.exe = fluid.Executor(place)
self.exe.run(self.startup_prog)
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_pretraining_params(
self.exe,
args.init_checkpoint,
main_program=self.startup_prog)
self.args = args
def _parse_args(self, conf_path, model_path=''):
args, unknown = parser.parse_known_args()
with open(conf_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
self.config_dict = config_dict
args.do_train = False
args.do_val = False
args.do_test = True
args.use_fast_executor = True
args.max_seq_len = config_dict['max_seq_len']
args.ernie_config_path = model_path + config_dict['model_conf_path']
args.vocab_path = model_path + config_dict['model_vocab_path']
args.init_checkpoint = model_path + config_dict['model_checkpoint_path']
if "for_cn" in config_dict:
args.for_cn = config_dict["for_cn"]
if "joint_training" in config_dict:
self.joint_training = config_dict['joint_training']
else:
self.joint_training = 0
return args
def _parse_train_args(self, train_set, epoch, save_model_path, config_dict):
self.args.train_set = train_set
self.args.save_model_path = save_model_path
self.args.epoch = epoch
if "save_steps" in config_dict:
self.args.save_steps = config_dict['save_steps']
else:
self.args.save_steps = 0
if "batch_size" in config_dict:
self.args.batch_size = config_dict['batch_size']
if 'learning_rate' in config_dict:
self.args.learning_rate = config_dict['learning_rate']
else:
self.args.learning_rate = 2e-5
if 'log_folder' in config_dict:
self.args.log_folder = config_dict['log_folder']
def matching(self, query, para, title=[]):
assert len(para) == len(query)
data = []
if len(title) != 0:
assert len(para) == len(title)
for q, t, p in zip(query, title, para):
data.append(q + '\t' + t + '\t' + p)
else:
for q, p in zip(query, para):
data.append(q + '\t\t' + p)
self.test_pyreader.decorate_tensor_provider(
self.predict_reader.data_generator(
data,
batch_size=self.args.batch_size,
shuffle=False))
self.test_pyreader.start()
fetch_list = [self.graph_vars["probs"].name]
while True:
try:
fetch_result = self.exe.run(program=self.test_prog,
fetch_list=fetch_list)
np_probs = fetch_result[0]
if self.joint_training == 0:
for data_prob in np_probs[:, 1].reshape(-1).tolist():
yield data_prob
else:
for data_prob in np_probs.reshape(-1).tolist():
yield data_prob
except fluid.core.EOFException:
self.test_pyreader.reset()
break
return
def train(self, train_set, epoch, save_model_path, **kwargs):
self._parse_train_args(train_set, epoch, save_model_path, kwargs)
args = self.args
check_cuda(args.use_cuda)
log = logging.getLogger()
if self.args.log_folder == '':
self.args.log_folder = '.'
if not os.path.exists(self.args.log_folder):
os.makedirs(self.args.log_folder)
prepare_logger(log, save_to_file=self.args.log_folder + '/log.train')
print_arguments(args, log)
dev_count = 1
reader = reader_ce_train.CETrainReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
max_seq_len=args.max_seq_len,
total_num=args.train_data_size,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
train_data_generator = reader.data_generator(
input_file=args.train_set,
batch_size=args.batch_size,
epoch=args.epoch,
dev_count=dev_count,
shuffle=True,
phase="train")
num_train_examples = reader.get_num_examples(args.train_set)
if self.args.save_steps == 0:
self.args.save_steps = int(math.ceil(num_train_examples * self.args.epoch / self.args.batch_size / 2))
max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count
warmup_steps = int(max_train_steps * args.warmup_proportion)
log.info("Device count: %d" % dev_count)
log.info("Num train examples: %d" % num_train_examples)
log.info("Max train steps: %d" % max_train_steps)
log.info("Num warmup steps: %d" % warmup_steps)
log.info("Learning rate: %f" % self.args.learning_rate)
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_pyreader, graph_vars = create_train_model(
args,
pyreader_name='train_reader',
ernie_config=self.ernie_config)
scheduled_lr = optimization(
loss=graph_vars["loss"],
warmup_steps=warmup_steps,
num_train_steps=max_train_steps,
learning_rate=args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=args.weight_decay,
scheduler=args.lr_scheduler,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling,
incr_every_n_steps=args.incr_every_n_steps,
decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
incr_ratio=args.incr_ratio,
decr_ratio=args.decr_ratio)
if args.verbose:
if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program,
batch_size=args.batch_size // args.max_seq_len)
else:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=train_program, batch_size=args.batch_size)
log.info("Theoretical memory usage in training: %.3f - %.3f %s" %
(lower_mem, upper_mem, unit))
self.exe.run(startup_prog)
init_pretraining_params(
self.exe,
args.init_checkpoint,
main_program=startup_prog)
train_pyreader.decorate_tensor_provider(train_data_generator)
train_pyreader.start()
if warmup_steps > 0:
graph_vars["learning_rate"] = scheduled_lr
steps = 0
time_begin = time.time()
current_epoch = 0
last_epoch = 0
total_loss = []
while True:
try:
steps += 1
if steps % args.skip_steps != 0:
self.exe.run(fetch_list=[], program=train_program)
else:
current_example, current_epoch = reader.get_train_progress()
time_end = time.time()
used_time = time_end - time_begin
train_fetch_list = [
graph_vars["loss"], graph_vars["accuracy"]
]
outputs = self.exe.run(fetch_list=train_fetch_list, program=train_program)
tmp_loss = np.mean(outputs[0])
tmp_acc = np.mean(outputs[1])
total_loss.append(tmp_loss)
log.info(
"epoch: %d, progress: %d/%d, step: %d, ave loss: %f, "
"ave acc: %f, speed: %f steps/s" %
(current_epoch, current_example * dev_count, num_train_examples,
steps, np.mean(total_loss), tmp_acc,
args.skip_steps / used_time))
time_begin = time.time()
if steps % args.save_steps == 0:
save_path = os.path.join(args.save_model_path,
"step_" + str(steps))
fluid.io.save_persistables(self.exe, save_path, train_program)
config_save_path = os.path.join(args.save_model_path, "config.json")
json.dump(self.config_dict, open(config_save_path, "w"))
shutil.copy(args.ernie_config_path, args.save_model_path)
shutil.copy(args.vocab_path, args.save_model_path)
if last_epoch != current_epoch:
last_epoch = current_epoch
except fluid.core.EOFException:
save_path = os.path.join(args.save_model_path, "step_" + str(steps))
fluid.io.save_persistables(self.exe, save_path, train_program)
config_save_path = os.path.join(args.save_model_path, "config.json")
json.dump(self.config_dict, open(config_save_path, "w"))
shutil.copy(args.ernie_config_path, args.save_model_path)
shutil.copy(args.vocab_path, args.save_model_path)
train_pyreader.reset()
break
| 13,669 | 38.623188 | 114 | py |
RocketQA | RocketQA-main/rocketqa/reader/reader_ce_predict.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
from rocketqa.utils import tokenization
from rocketqa.utils.batching import pad_batch_data
log = logging.getLogger(__name__)
def csv_reader(fd, delimiter='\t', trainer_id=0, trainer_num=1):
def gen():
for i, line in enumerate(fd):
if i % trainer_num == trainer_id:
slots = line.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield slots,
else:
yield slots
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.max_seq_len = max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_a = tokenizer.tokenize(query)
tokens_b = None
title = tokenization.convert_to_unicode(example.title)
tokens_b = tokenizer.tokenize(title)
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
tokens_b.extend(tokens_para)
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
# The convention in BERT/ERNIE is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
text_type_ids = []
tokens.append("[CLS]")
text_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
text_type_ids.append(0)
tokens.append("[SEP]")
text_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
text_type_ids.append(1)
tokens.append("[SEP]")
text_type_ids.append(1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
position_ids = list(range(len(token_ids)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids', 'text_type_ids', 'position_ids'])
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record', [
'token_ids', 'text_type_ids', 'position_ids', 'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.max_seq_len,
self.tokenizer)
max_len = max(max_len, len(record.token_ids))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
batch_records, max_len = [record], len(record.token_ids)
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
return self.num_examples
def data_generator(self,
input_file,
batch_size,
shuffle=True,
phase=None):
if phase == 'train':
self.num_examples_per_node = self.total_num // trainer_num
self.num_examples = self.num_examples_per_node * trainer_num
examples = self._read_tsv(input_file, trainer_id=trainer_id, \
trainer_num=trainer_num, num_examples=self.num_examples_per_node)
log.info('apply sharding %d/%d' % (trainer_id, trainer_num))
else:
examples = self._read_tsv(input_file)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class CEPredictorReader(BaseReader):
def data_generator(self,
batch_samples,
batch_size,
dev_count=1,
shuffle=True,
phase=None):
examples = self._read_samples(batch_samples)
def wrapper():
all_dev_batches = []
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
def _read_samples(self, batch_samples):
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(batch_samples):
line = line.rstrip('\n').split('\t')
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
line.append('0')
example = Example(*line)
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids = [record.token_ids for record in batch_records]
batch_text_type_ids = [record.text_type_ids for record in batch_records]
batch_position_ids = [record.position_ids for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids = pad_batch_data(
batch_text_type_ids, pad_idx=self.pad_id)
padded_position_ids = pad_batch_data(
batch_position_ids, pad_idx=self.pad_id)
padded_task_ids = np.ones_like(
padded_token_ids, dtype="int64") * self.task_id
return_list = [
padded_token_ids, padded_text_type_ids, padded_position_ids,
padded_task_ids, input_mask
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
| 13,177 | 35.913165 | 103 | py |
RocketQA | RocketQA-main/rocketqa/reader/reader_ce_train.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
from rocketqa.utils import tokenization
from rocketqa.utils.batching import pad_batch_data
log = logging.getLogger(__name__)
def csv_reader(fd, delimiter='\t', trainer_id=0, trainer_num=1):
def gen():
for i, line in enumerate(fd):
if i % trainer_num == trainer_id:
slots = line.rstrip('\n').split(delimiter)
if len(slots) == 1:
yield slots,
else:
yield slots
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.max_seq_len = max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_a = tokenizer.tokenize(query)
tokens_b = None
title = tokenization.convert_to_unicode(example.title)
tokens_b = tokenizer.tokenize(title)
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
tokens_b.extend(tokens_para)
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
# The convention in BERT/ERNIE is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
text_type_ids = []
tokens.append("[CLS]")
text_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
text_type_ids.append(0)
tokens.append("[SEP]")
text_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
text_type_ids.append(1)
tokens.append("[SEP]")
text_type_ids.append(1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
position_ids = list(range(len(token_ids)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids', 'text_type_ids', 'position_ids'])
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record', [
'token_ids', 'text_type_ids', 'position_ids', 'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids=token_ids,
text_type_ids=text_type_ids,
position_ids=position_ids,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.max_seq_len,
self.tokenizer)
max_len = max(max_len, len(record.token_ids))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
batch_records, max_len = [record], len(record.token_ids)
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
examples = self._read_tsv(input_file)
return len(examples)
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
trainer_id=0,
trainer_num=1,
shuffle=True,
phase=None):
if phase == 'train':
# examples = examples[trainer_id: (len(examples) //trainer_num) * trainer_num : trainer_num]
self.num_examples_per_node = self.total_num // trainer_num
self.num_examples = self.num_examples_per_node * trainer_num
examples = self._read_tsv(input_file, trainer_id=trainer_id, trainer_num=trainer_num, num_examples=self.num_examples_per_node)
log.info('apply sharding %d/%d' % (trainer_id, trainer_num))
else:
examples = self._read_tsv(input_file)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class CETrainReader(BaseReader):
def _read_tsv(self, input_file, quotechar=None, trainer_id=0, trainer_num=1, num_examples=0):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f, trainer_id=trainer_id, trainer_num=trainer_num)
headers = 'query\ttitle\tpara\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(reader):
if num_examples != 0 and cnt == num_examples:
break
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids = [record.token_ids for record in batch_records]
batch_text_type_ids = [record.text_type_ids for record in batch_records]
batch_position_ids = [record.position_ids for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids = pad_batch_data(
batch_text_type_ids, pad_idx=self.pad_id)
padded_position_ids = pad_batch_data(
batch_position_ids, pad_idx=self.pad_id)
padded_task_ids = np.ones_like(
padded_token_ids, dtype="int64") * self.task_id
return_list = [
padded_token_ids, padded_text_type_ids, padded_position_ids,
padded_task_ids, input_mask
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
| 12,784 | 37.050595 | 138 | py |
RocketQA | RocketQA-main/rocketqa/reader/reader_de_train.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
from rocketqa.utils import tokenization
from rocketqa.utils.batching import pad_batch_data
log = logging.getLogger(__name__)
def csv_reader(fd, delimiter='\t'):
def gen():
for i, line in enumerate(fd):
yield line.rstrip('\n').split(delimiter)
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
total_num=0,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
self.current_example = 0
self.current_epoch = 0
self.total_num = total_num
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
# pos title
title_pos = tokenization.convert_to_unicode(example.title_pos)
tokens_title_pos = tokenizer.tokenize(title_pos)
# pos para
para_pos = tokenization.convert_to_unicode(example.para_pos)
tokens_para_pos = tokenizer.tokenize(para_pos)
self._truncate_seq_pair(tokens_title_pos, tokens_para_pos, p_max_seq_length - 3)
# neg title
title_neg = tokenization.convert_to_unicode(example.title_neg)
tokens_title_neg = tokenizer.tokenize(title_neg)
# neg para
para_neg = tokenization.convert_to_unicode(example.para_neg)
tokens_para_neg = tokenizer.tokenize(para_neg)
self._truncate_seq_pair(tokens_title_neg, tokens_para_neg, p_max_seq_length - 3)
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
### pos_para
tokens_p_pos = []
text_type_ids_p_pos = []
tokens_p_pos.append("[CLS]")
text_type_ids_p_pos.append(0)
for token in tokens_title_pos:
tokens_p_pos.append(token)
text_type_ids_p_pos.append(0)
tokens_p_pos.append("[SEP]")
text_type_ids_p_pos.append(0)
for token in tokens_para_pos:
tokens_p_pos.append(token)
text_type_ids_p_pos.append(1)
tokens_p_pos.append("[SEP]")
text_type_ids_p_pos.append(1)
token_ids_p_pos = tokenizer.convert_tokens_to_ids(tokens_p_pos)
position_ids_p_pos = list(range(len(token_ids_p_pos)))
### neg_para
tokens_p_neg = []
text_type_ids_p_neg = []
tokens_p_neg.append("[CLS]")
text_type_ids_p_neg.append(0)
for token in tokens_title_neg:
tokens_p_neg.append(token)
text_type_ids_p_neg.append(0)
tokens_p_neg.append("[SEP]")
text_type_ids_p_neg.append(0)
for token in tokens_para_neg:
tokens_p_neg.append(token)
text_type_ids_p_neg.append(1)
tokens_p_neg.append("[SEP]")
text_type_ids_p_neg.append(1)
token_ids_p_neg = tokenizer.convert_tokens_to_ids(tokens_p_neg)
position_ids_p_neg = list(range(len(token_ids_p_neg)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p_pos', 'text_type_ids_p_pos', 'position_ids_p_pos', \
'token_ids_p_neg', 'text_type_ids_p_neg', 'position_ids_p_neg'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p_pos=token_ids_p_pos,
text_type_ids_p_pos=text_type_ids_p_pos,
position_ids_p_pos=position_ids_p_pos,
token_ids_p_neg=token_ids_p_neg,
text_type_ids_p_neg=text_type_ids_p_neg,
position_ids_p_neg=position_ids_p_neg)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p_pos', 'text_type_ids_p_pos', 'position_ids_p_pos', \
'token_ids_p_neg', 'text_type_ids_p_neg', 'position_ids_p_neg',
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p_pos=token_ids_p_pos,
text_type_ids_p_pos=text_type_ids_p_pos,
position_ids_p_pos=position_ids_p_pos,
token_ids_p_neg=token_ids_p_neg,
text_type_ids_p_neg=text_type_ids_p_neg,
position_ids_p_neg=position_ids_p_neg,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p_pos))
max_len = max(max_len, len(record.token_ids_p_neg))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = max(len(record.token_ids_p_neg), len(record.token_ids_p_pos))
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
return len(self._read_tsv(input_file))
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
shuffle=True,
phase=None):
examples = self._read_tsv(input_file, batch_size=batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class DETrainReader(BaseReader):
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = 'query\ttitle_pos\tpara_pos\ttitle_neg\tpara_neg\tlabel'.split('\t')
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
for cnt, line in enumerate(reader):
for index, text in enumerate(line):
if index in text_indices:
if self.for_cn:
line[index] = text.replace(' ', '')
else:
line[index] = text
example = Example(*line)
examples.append(example)
while len(examples) % batch_size != 0:
examples.append(example)
return examples
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p_pos = [record.token_ids_p_pos for record in batch_records]
batch_text_type_ids_p_pos = [record.text_type_ids_p_pos for record in batch_records]
batch_position_ids_p_pos = [record.position_ids_p_pos for record in batch_records]
batch_token_ids_p_neg = [record.token_ids_p_neg for record in batch_records]
batch_text_type_ids_p_neg = [record.text_type_ids_p_neg for record in batch_records]
batch_position_ids_p_neg = [record.position_ids_p_neg for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p_pos, input_mask_p_pos = pad_batch_data(
batch_token_ids_p_pos, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p_pos = pad_batch_data(
batch_text_type_ids_p_pos, pad_idx=self.pad_id)
padded_position_ids_p_pos = pad_batch_data(
batch_position_ids_p_pos, pad_idx=self.pad_id)
padded_task_ids_p_pos = np.ones_like(padded_token_ids_p_pos, dtype="int64") * self.task_id
padded_token_ids_p_neg, input_mask_p_neg = pad_batch_data(
batch_token_ids_p_neg, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p_neg = pad_batch_data(
batch_text_type_ids_p_neg, pad_idx=self.pad_id)
padded_position_ids_p_neg = pad_batch_data(
batch_position_ids_p_neg, pad_idx=self.pad_id)
padded_task_ids_p_neg = np.ones_like(padded_token_ids_p_neg, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p_pos, padded_text_type_ids_p_pos, padded_position_ids_p_pos, padded_task_ids_p_pos,
input_mask_p_pos,
padded_token_ids_p_neg, padded_text_type_ids_p_neg, padded_position_ids_p_neg, padded_task_ids_p_neg,
input_mask_p_neg
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
if __name__ == '__main__':
pass
| 15,595 | 38.887468 | 113 | py |
RocketQA | RocketQA-main/rocketqa/reader/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/rocketqa/reader/reader_de_predict.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
from rocketqa.utils import tokenization
from rocketqa.utils.batching import pad_batch_data
log = logging.getLogger(__name__)
def csv_reader(fd, delimiter='\t'):
def gen():
for i in fd:
yield i.rstrip('\n').split(delimiter)
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
q_max_seq_len=128,
p_max_seq_len=512,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
for_cn=True,
task_id=0):
self.q_max_seq_len = q_max_seq_len
self.p_max_seq_len = p_max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _read_tsv(self, input_file, batch_size=16, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf8') as f:
reader = csv_reader(f)
headers = next(reader)
Example = namedtuple('Example', headers)
examples = []
for line in reader:
example = Example(*line)
examples.append(example)
return examples
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _convert_example_to_record(self, example, q_max_seq_length, p_max_seq_length, tokenizer):
"""Converts a single `Example` into a single `Record`."""
query = tokenization.convert_to_unicode(example.query)
tokens_query = tokenizer.tokenize(query)
self._truncate_seq_pair([], tokens_query, q_max_seq_length - 2)
title = tokenization.convert_to_unicode(example.title)
tokens_title = tokenizer.tokenize(title)
para = tokenization.convert_to_unicode(example.para)
tokens_para = tokenizer.tokenize(para)
self._truncate_seq_pair(tokens_title, tokens_para, p_max_seq_length - 3)
### query
tokens_q = []
text_type_ids_q = []
tokens_q.append("[CLS]")
text_type_ids_q.append(0)
for token in tokens_query:
tokens_q.append(token)
text_type_ids_q.append(0)
tokens_q.append("[SEP]")
text_type_ids_q.append(0)
token_ids_q = tokenizer.convert_tokens_to_ids(tokens_q)
position_ids_q = list(range(len(token_ids_q)))
### title-para
tokens_p = []
text_type_ids_p = []
tokens_p.append("[CLS]")
text_type_ids_p.append(0)
for token in tokens_title:
tokens_p.append(token)
text_type_ids_p.append(0)
tokens_p.append("[SEP]")
text_type_ids_p.append(0)
for token in tokens_para:
tokens_p.append(token)
text_type_ids_p.append(1)
tokens_p.append("[SEP]")
text_type_ids_p.append(1)
token_ids_p = tokenizer.convert_tokens_to_ids(tokens_p)
position_ids_p = list(range(len(token_ids_p)))
if self.is_inference:
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p'])
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p)
else:
if self.label_map:
label_id = self.label_map[example.label]
else:
label_id = example.label
Record = namedtuple('Record',
['token_ids_q', 'text_type_ids_q', 'position_ids_q', \
'token_ids_p', 'text_type_ids_p', 'position_ids_p', \
'label_id', 'qid'
])
qid = None
if "qid" in example._fields:
qid = example.qid
record = Record(
token_ids_q=token_ids_q,
text_type_ids_q=text_type_ids_q,
position_ids_q=position_ids_q,
token_ids_p=token_ids_p,
text_type_ids_p=text_type_ids_p,
position_ids_p=position_ids_p,
label_id=label_id,
qid=qid)
return record
def _prepare_batch_data(self, examples, batch_size, phase=None, read_id=False):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if phase == "train":
self.current_example = index
if read_id is False:
record = self._convert_example_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
else:
record = self._convert_example_id_to_record(example, self.q_max_seq_len,
self.p_max_seq_len, self.tokenizer)
max_len = max(max_len, len(record.token_ids_p))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._pad_batch_records(batch_records)
max_len = len(record.token_ids_p)
batch_records = [record]
if batch_records:
yield self._pad_batch_records(batch_records)
def get_num_examples(self, input_file):
examples = self._read_tsv(input_file)
return len(examples)
def data_generator(self,
input_file,
batch_size,
epoch,
dev_count=1,
shuffle=True,
phase=None,
read_id=False):
examples = self._read_tsv(input_file, batch_size)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if phase == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase, read_id=read_id):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
class DEPredictorReader(BaseReader):
def _read_samples(self, batch_samples, quotechar=None):
headers = 'query\ttitle\tpara\tlabel'.split('\t')
Example = namedtuple('Example', headers)
examples = []
for data in batch_samples:
line = []
sample = data.strip().split('\t')
for i in range(3):
if self.for_cn:
line.append(sample[i].replace(' ', ''))
else:
line.append(sample[i])
line.append('0')
example = Example(*line)
examples.append(example)
return examples
def data_generator(self,
samples,
batch_size=32,
dev_count=1,
shuffle=False,
phase=None,
read_id=False):
examples = self._read_samples(samples)
def wrapper():
all_dev_batches = []
for batch_data in self._prepare_batch_data(
examples, batch_size, phase=phase, read_id=read_id):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
def _pad_batch_records(self, batch_records):
batch_token_ids_q = [record.token_ids_q for record in batch_records]
batch_text_type_ids_q = [record.text_type_ids_q for record in batch_records]
batch_position_ids_q = [record.position_ids_q for record in batch_records]
batch_token_ids_p = [record.token_ids_p for record in batch_records]
batch_text_type_ids_p = [record.text_type_ids_p for record in batch_records]
batch_position_ids_p = [record.position_ids_p for record in batch_records]
if not self.is_inference:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
if batch_records[0].qid:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape(
[-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
# padding
padded_token_ids_q, input_mask_q = pad_batch_data(
batch_token_ids_q, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_q = pad_batch_data(
batch_text_type_ids_q, pad_idx=self.pad_id)
padded_position_ids_q = pad_batch_data(
batch_position_ids_q, pad_idx=self.pad_id)
padded_task_ids_q = np.ones_like(padded_token_ids_q, dtype="int64") * self.task_id
padded_token_ids_p, input_mask_p = pad_batch_data(
batch_token_ids_p, pad_idx=self.pad_id, return_input_mask=True)
padded_text_type_ids_p = pad_batch_data(
batch_text_type_ids_p, pad_idx=self.pad_id)
padded_position_ids_p = pad_batch_data(
batch_position_ids_p, pad_idx=self.pad_id)
padded_task_ids_p = np.ones_like(padded_token_ids_p, dtype="int64") * self.task_id
return_list = [
padded_token_ids_q, padded_text_type_ids_q, padded_position_ids_q, padded_task_ids_q,
input_mask_q,
padded_token_ids_p, padded_text_type_ids_p, padded_position_ids_p, padded_task_ids_p,
input_mask_p,
]
if not self.is_inference:
return_list += [batch_labels, batch_qids]
return return_list
| 13,619 | 36.01087 | 97 | py |
RocketQA | RocketQA-main/rocketqa/utils/optimization.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization and learning rate scheduling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter()
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr
def optimization(loss,
warmup_steps,
num_train_steps,
learning_rate,
train_program,
startup_prog,
weight_decay,
scheduler='linear_warmup_decay',
use_dynamic_loss_scaling=False,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
incr_ratio=2.0,
decr_ratio=0.8,
dist_strategy=None,
use_lamb=False):
if warmup_steps > 0:
if scheduler == 'noam_decay':
scheduled_lr = fluid.layers.learning_rate_scheduler\
.noam_decay(1/(warmup_steps *(learning_rate ** 2)),
warmup_steps)
elif scheduler == 'linear_warmup_decay':
scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps,
num_train_steps)
else:
raise ValueError("Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_warmup_decay'")
if use_lamb:
optimizer = fluid.optimizer.LambOptimizer(learning_rate=scheduled_lr)
else:
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
else:
scheduled_lr = fluid.layers.create_global_var(
name=fluid.unique_name.generate("learning_rate"),
shape=[1],
value=learning_rate,
dtype='float32',
persistable=True)
if use_lamb:
optimizer = fluid.optimizer.LambOptimizer(learning_rate=scheduled_lr)
else:
optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr)
optimizer._learning_rate_map[fluid.default_main_program(
)] = scheduled_lr
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0))
def exclude_from_weight_decay(name):
if name.find("layer_norm") > -1:
return True
bias_suffix = ["_bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False
param_list = dict()
for param in train_program.global_block().all_parameters():
param_list[param.name] = param * 1.0
param_list[param.name].stop_gradient = True
# if dist_strategy is not None:
# use fleet api
# optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
_, param_grads = optimizer.minimize(loss)
if weight_decay > 0:
for param, grad in param_grads:
if exclude_from_weight_decay(param.name):
continue
with param.block.program._optimized_guard(
[param, grad]), fluid.framework.name_scope("weight_decay"):
updated_param = param - param_list[
param.name] * weight_decay * scheduled_lr
fluid.layers.assign(output=param, input=updated_param)
return scheduled_lr
| 5,188 | 37.723881 | 84 | py |
RocketQA | RocketQA-main/rocketqa/utils/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from io import open
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, encoding='utf8') as fin:
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class CharTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in text.lower().split(" "):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
output = []
buff = ""
for char in text:
cp = ord(char)
if _is_chinese_char(cp) or _is_whitespace(char):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
| 14,348 | 32.921986 | 84 | py |
RocketQA | RocketQA-main/rocketqa/utils/args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arguments for configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import six
import os
import sys
import argparse
import logging
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def prepare_logger(logger, debug=False, save_to_file=None):
formatter = logging.Formatter(fmt='[%(levelname)s] %(asctime)s [%(filename)12s:%(lineno)5d]:\t%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(formatter)
logger.addHandler(console_hdl)
#if save_to_file is not None and not os.path.exists(save_to_file):
if save_to_file is not None:
file_hdl = logging.FileHandler(save_to_file)
file_hdl.setFormatter(formatter)
logger.addHandler(file_hdl)
logger.setLevel(logging.DEBUG)
logger.propagate = False
def str2bool(v):
# because argparse does not support to parse "true, False" as python
# boolean directly
return v.lower() in ("true", "t", "1")
class ArgumentGroup(object):
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, positional_arg=False, **kwargs):
prefix = "" if positional_arg else "--"
type = str2bool if type == bool else type
self._group.add_argument(
prefix + name,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def print_arguments(args, logger):
logger.info('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
logger.info('%s: %s' % (arg, value))
logger.info('------------------------------------------------')
def check_cuda(use_cuda, err = \
"\nYou can not set use_cuda = True in the model because you are using paddlepaddle-cpu.\n \
Please: 1. Install paddlepaddle-gpu to run your models on GPU or 2. Set use_cuda = False to run models on CPU.\n"
):
try:
if use_cuda == True and fluid.is_compiled_with_cuda() == False:
log.error(err)
sys.exit(1)
except Exception as e:
pass
| 3,047 | 34.858824 | 119 | py |
RocketQA | RocketQA-main/rocketqa/utils/finetune_args.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import argparse
from rocketqa.utils.args import ArgumentGroup
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
model_g = ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("ernie_config_path", str, None, "Path to the json file for ernie model config.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("init_pretraining_params", str, None,
"Init pre-training params which preforms fine-tuning from. If the "
"arg 'init_checkpoint' has been set, this argument wouldn't be valid.")
model_g.add_arg("checkpoints", str, "checkpoints", "Path to save checkpoints.")
model_g.add_arg("is_classify", bool, True, "is_classify")
model_g.add_arg("is_regression", bool, False, "is_regression")
model_g.add_arg("task_id", int, 0, "task id")
train_g = ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("epoch", int, 3, "Number of epoches for fine-tuning.")
train_g.add_arg("learning_rate", float, 5e-5, "Learning rate used to train with warmup.")
train_g.add_arg("lr_scheduler", str, "linear_warmup_decay",
"scheduler of learning rate.", choices=['linear_warmup_decay', 'noam_decay'])
train_g.add_arg("weight_decay", float, 0.01, "Weight decay rate for L2 regularizer.")
train_g.add_arg("warmup_proportion", float, 0.1,
"Proportion of training steps to perform linear learning rate warmup for.")
train_g.add_arg("save_steps", int, 10000, "The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 1000, "The steps interval to evaluate model performance.")
train_g.add_arg("use_recompute", bool, False, "Whether to use recompute optimizer for training.")
train_g.add_arg("use_mix_precision", bool, False, "Whether to use mix-precision optimizer for training.")
train_g.add_arg("use_cross_batch", bool, False, "Whether to use cross-batch for training.")
train_g.add_arg("use_lamb", bool, False, "Whether to use LambOptimizer for training.")
train_g.add_arg("use_dynamic_loss_scaling", bool, True, "Whether to use dynamic loss scaling.")
train_g.add_arg("init_loss_scaling", float, 102400,
"Loss scaling factor for mixed precision training, only valid when use_fp16 is enabled.")
train_g.add_arg("test_save", str, "./checkpoints/test_result", "test_save")
train_g.add_arg("log_folder", str, "./log/", "log_folder")
train_g.add_arg("metric", str, "simple_accuracy", "metric")
train_g.add_arg("incr_every_n_steps", int, 100, "Increases loss scaling every n consecutive.")
train_g.add_arg("decr_every_n_nan_or_inf", int, 2,
"Decreases loss scaling every n accumulated steps with nan or inf gradients.")
train_g.add_arg("incr_ratio", float, 2.0,
"The multiplier to use when increasing the loss scaling.")
train_g.add_arg("decr_ratio", float, 0.8,
"The less-than-one-multiplier to use when decreasing.")
log_g = ArgumentGroup(parser, "logging", "logging related.")
log_g.add_arg("skip_steps", int, 100, "The steps interval to print loss.")
log_g.add_arg("verbose", bool, False, "Whether to output verbose log.")
data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options")
data_g.add_arg("tokenizer", str, "FullTokenizer",
"ATTENTION: the INPUT must be splited by Word with blank while using SentencepieceTokenizer or WordsegTokenizer")
data_g.add_arg("train_set", str, None, "Path to training data.")
data_g.add_arg("test_set", str, None, "Path to test data.")
data_g.add_arg("dev_set", str, None, "Path to validation data.")
data_g.add_arg("vocab_path", str, None, "Vocabulary path.")
data_g.add_arg("max_seq_len", int, 512, "Number of words of the longest seqence.")
data_g.add_arg("q_max_seq_len", int, 32, "Number of words of the longest seqence.")
data_g.add_arg("p_max_seq_len", int, 256, "Number of words of the longest seqence.")
data_g.add_arg("train_data_size", int, 0, "Number of training data's total examples. Set for distribute.")
data_g.add_arg("batch_size", int, 32, "Total examples' number in batch for training. see also --in_tokens.")
data_g.add_arg("predict_batch_size", int, None, "Total examples' number in batch for predict. see also --in_tokens.")
data_g.add_arg("in_tokens", bool, False,
"If set, the batch size will be the maximum number of tokens in one batch. "
"Otherwise, it will be the maximum number of examples in one batch.")
data_g.add_arg("do_lower_case", bool, True,
"Whether to lower case the input text. Should be True for uncased models and False for cased models.")
data_g.add_arg("random_seed", int, None, "Random seed.")
data_g.add_arg("label_map_config", str, None, "label_map_path.")
data_g.add_arg("num_labels", int, 2, "label number")
data_g.add_arg("diagnostic", str, None, "GLUE Diagnostic Dataset")
data_g.add_arg("diagnostic_save", str, None, "GLUE Diagnostic save f")
data_g.add_arg("max_query_length", int, 64, "Max query length.")
data_g.add_arg("max_answer_length", int, 100, "Max answer length.")
data_g.add_arg("doc_stride", int, 128,
"When splitting up a long document into chunks, how much stride to take between chunks.")
data_g.add_arg("n_best_size", int, 20,
"The total number of n-best predictions to generate in the nbest_predictions.json output file.")
data_g.add_arg("chunk_scheme", type=str, default="IOB", choices=["IO", "IOB", "IOE", "IOBES"], help="chunk scheme")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, True, "If set, use GPU for training.")
run_type_g.add_arg("is_distributed", bool, False, "If set, then start distributed training.")
run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).")
run_type_g.add_arg("num_iteration_per_drop_scope", int, 10, "Iteration intervals to drop scope.")
run_type_g.add_arg("do_train", bool, True, "Whether to perform training.")
run_type_g.add_arg("do_val", bool, True, "Whether to perform evaluation on dev data set.")
run_type_g.add_arg("do_test", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("output_item", int, 3, "Test output format.")
run_type_g.add_arg("output_file_name", str, None, "Test output file name")
run_type_g.add_arg("test_data_cnt", int, 1110000 , "total cnt of testset")
run_type_g.add_arg("use_multi_gpu_test", bool, False, "Whether to perform evaluation using multiple gpu cards")
run_type_g.add_arg("metrics", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("shuffle", bool, True, "")
run_type_g.add_arg("for_cn", bool, False, "model train for cn or for other langs.")
parser.add_argument("--enable_ce", action='store_true', help="The flag indicating whether to run the task for continuous evaluation.")
| 8,590 | 68.845528 | 134 | py |
RocketQA | RocketQA-main/rocketqa/utils/init.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import six
import ast
import copy
import logging
import numpy as np
import paddle.fluid as fluid
log = logging.getLogger(__name__)
def init_checkpoint(exe, init_checkpoint_path, main_program):
assert os.path.exists(
init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
def existed_persitables(var):
if not fluid.io.is_persistable(var):
return False
if not os.path.exists(os.path.join(init_checkpoint_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(init_checkpoint_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars(
exe,
init_checkpoint_path,
main_program=main_program,
predicate=existed_persitables)
log.info("Load model from {}".format(init_checkpoint_path))
def init_pretraining_params(exe,
pretraining_params_path,
main_program):
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
if not os.path.exists(os.path.join(pretraining_params_path, var.name)):
print ("Var not exists: [%s]\t%s" % (var.name, os.path.join(pretraining_params_path, var.name)))
#else:
# print ("Var exists: [%s]" % (var.name))
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=existed_params)
log.info("Load pretraining parameters from {}.".format(
pretraining_params_path))
| 2,695 | 34.946667 | 108 | py |
RocketQA | RocketQA-main/rocketqa/utils/batching.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask, padding and batching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
def pad_batch_data(insts,
pad_idx=0,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False,
return_seq_lens=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array(
[inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
# position data
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
if return_input_mask:
# This is used to avoid attention on paddings.
input_mask_data = np.array([[1] * len(inst) + [0] *
(max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
if return_seq_lens:
seq_lens = np.array([len(inst) for inst in insts])
return_list += [seq_lens.astype("int64").reshape([-1, 1])]
return return_list if len(return_list) > 1 else return_list[0]
if __name__ == "__main__":
pass
| 2,683 | 33.410256 | 78 | py |
RocketQA | RocketQA-main/rocketqa/utils/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/rocketqa/model/dual_encoder_predict.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from six.moves import xrange
import paddle.fluid as fluid
from rocketqa.model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_predict_model(args,
pyreader_name,
ernie_config,
is_prediction=False,
task_name="",
share_parameter=0):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, args.q_max_seq_len, 1], [-1, args.q_max_seq_len, 1],
[-1, args.q_max_seq_len, 1], [-1, args.q_max_seq_len, 1],
[-1, args.q_max_seq_len, 1],
[-1, args.p_max_seq_len, 1], [-1, args.p_max_seq_len, 1],
[-1, args.p_max_seq_len, 1], [-1, args.p_max_seq_len, 1],
[-1, args.p_max_seq_len, 1],
[-1, 1], [-1, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p, sent_ids_p, pos_ids_p, task_ids_p, input_mask_p,
labels, qids) = fluid.layers.read_file(pyreader)
if share_parameter == 0:
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='query_')
else:
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='titlepara_')
## pos para
ernie_p = ErnieModel(
src_ids=src_ids_p,
position_ids=pos_ids_p,
sentence_ids=sent_ids_p,
task_ids=task_ids_p,
input_mask=input_mask_p,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_cls_output()
p_cls_feats = ernie_p.get_cls_output()
#multiply
multi = fluid.layers.elementwise_mul(q_cls_feats, p_cls_feats)
probs = fluid.layers.reduce_sum(multi, dim=-1)
graph_vars = {
"probs": probs,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
| 3,319 | 31.54902 | 74 | py |
RocketQA | RocketQA-main/rocketqa/model/cross_encoder_predict.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from six.moves import xrange
import paddle.fluid as fluid
from rocketqa.model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_predict_model(args,
pyreader_name,
ernie_config,
is_prediction=False,
task_name="",
joint_training=0):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, 1], [-1, 1]],
dtypes=[
'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64'
],
lod_levels=[0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids, sent_ids, pos_ids, task_ids, input_mask, labels,
qids) = fluid.layers.read_file(pyreader)
def _model(is_noise=False):
if joint_training == 1:
ernie = ErnieModel(
src_ids=src_ids,
position_ids=pos_ids,
sentence_ids=sent_ids,
task_ids=task_ids,
input_mask=input_mask,
config=ernie_config,
is_noise=is_noise,
model_name='qtp_')
cls_feats = ernie.get_pooled_output(joint_training=1)
else:
ernie = ErnieModel(
src_ids=src_ids,
position_ids=pos_ids,
sentence_ids=sent_ids,
task_ids=task_ids,
input_mask=input_mask,
config=ernie_config,
is_noise=is_noise)
cls_feats = ernie.get_pooled_output()
if not is_noise:
cls_feats = fluid.layers.dropout(
x=cls_feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
if joint_training == 1:
logits = fluid.layers.fc(
input=cls_feats,
size=1,
param_attr=fluid.ParamAttr(
name="qtp__cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name="qtp__cls_out_b",
initializer=fluid.initializer.Constant(0.)))
probs = logits
else:
logits = fluid.layers.fc(
input=cls_feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=task_name + "_cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=task_name + "_cls_out_b",
initializer=fluid.initializer.Constant(0.)))
probs = fluid.layers.softmax(logits)
graph_vars = {
"probs": probs,
}
return graph_vars
if not is_prediction:
graph_vars = _model(is_noise=True)
old_loss = graph_vars["loss"]
token_emb = fluid.default_main_program().global_block().var("word_embedding")
token_emb.stop_gradient = False
token_gradient = fluid.gradients(old_loss, token_emb)[0]
token_gradient.stop_gradient = False
epsilon = 1e-8
norm = (fluid.layers.sqrt(
fluid.layers.reduce_sum(fluid.layers.square(token_gradient)) + epsilon))
gp = (0.01 * token_gradient) / norm
gp.stop_gradient = True
fluid.layers.assign(token_emb + gp, token_emb)
graph_vars = _model()
fluid.layers.assign(token_emb - gp, token_emb)
else:
graph_vars = _model()
return pyreader, graph_vars
| 4,645 | 33.932331 | 85 | py |
RocketQA | RocketQA-main/rocketqa/model/dual_encoder_train.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from rocketqa.model.ernie import ErnieModel
def create_train_model(args,
pyreader_name,
ernie_config,
batch_size=16,
is_prediction=False,
task_name="",
fleet_handle=None):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],
[batch_size, args.q_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],
[batch_size, args.p_max_seq_len, 1],
[batch_size, 1], [batch_size, 1]],
dtypes=['int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64', 'int64', 'int64', 'float32',
'int64', 'int64'],
lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,
src_ids_p_pos, sent_ids_p_pos, pos_ids_p_pos, task_ids_p_pos, input_mask_p_pos,
src_ids_p_neg, sent_ids_p_neg, pos_ids_p_neg, task_ids_p_neg, input_mask_p_neg,
labels, qids) = fluid.layers.read_file(pyreader)
ernie_q = ErnieModel(
src_ids=src_ids_q,
position_ids=pos_ids_q,
sentence_ids=sent_ids_q,
task_ids=task_ids_q,
input_mask=input_mask_q,
config=ernie_config,
model_name='query_')
## pos para
ernie_pos = ErnieModel(
src_ids=src_ids_p_pos,
position_ids=pos_ids_p_pos,
sentence_ids=sent_ids_p_pos,
task_ids=task_ids_p_pos,
input_mask=input_mask_p_pos,
config=ernie_config,
model_name='titlepara_')
## neg para
ernie_neg = ErnieModel(
src_ids=src_ids_p_neg,
position_ids=pos_ids_p_neg,
sentence_ids=sent_ids_p_neg,
task_ids=task_ids_p_neg,
input_mask=input_mask_p_neg,
config=ernie_config,
model_name='titlepara_')
q_cls_feats = ernie_q.get_cls_output()
pos_cls_feats = ernie_pos.get_cls_output()
neg_cls_feats = ernie_neg.get_cls_output()
#src_ids_p_pos = fluid.layers.Print(src_ids_p_pos, message='pos: ')
#pos_cls_feats = fluid.layers.Print(pos_cls_feats, message='pos: ')
p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)
if is_prediction:
p_cls_feats = fluid.layers.slice(p_cls_feats, axes=[0], starts=[0], ends=[batch_size])
multi = fluid.layers.elementwise_mul(q_cls_feats, p_cls_feats)
probs = fluid.layers.reduce_sum(multi, dim=-1)
graph_vars = {
"probs": probs,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
if args.use_cross_batch and fleet_handle is not None:
print("worker num is: {}".format(fleet_handle.worker_num()))
all_p_cls_feats = fluid.layers.collective._c_allgather(
p_cls_feats, fleet_handle.worker_num(), use_calc_stream=True)
#multiply
logits = fluid.layers.matmul(q_cls_feats, all_p_cls_feats, transpose_x=False, transpose_y=True)
worker_id = fleet_handle.worker_index()
else:
logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)
worker_id = 0
probs = logits
all_labels = np.array(range(batch_size * worker_id * 2, batch_size * (worker_id * 2 + 1)), dtype='int64')
matrix_labels = fluid.layers.assign(all_labels)
matrix_labels = fluid.layers.unsqueeze(matrix_labels, axes=1)
matrix_labels.stop_gradient=True
ce_loss = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=matrix_labels)
loss = fluid.layers.mean(x=ce_loss)
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(
input=probs, label=matrix_labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"q_rep": q_cls_feats,
"p_rep": p_cls_feats
}
return pyreader, graph_vars
| 5,659 | 36.733333 | 109 | py |
RocketQA | RocketQA-main/rocketqa/model/transformer_encoder.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
param_initializer=None,
name='multi_head_att'):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
keys = queries if keys is None else keys
values = keys if values is None else values
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_query_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_query_fc.b_0')
k = layers.fc(input=keys,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_key_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_key_fc.b_0')
v = layers.fc(input=values,
size=d_value * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_value_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_value_fc.b_0')
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped = layers.reshape(
x=x, shape=[0, 0, n_head, hidden_size // n_head], inplace=True)
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
scaled_q = layers.scale(x=q, scale=d_key**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
if cache is not None: # use cache and concat time steps
# Since the inplace reshape in __split_heads changes the shape of k and
# v, which is the cache input for next time step, reshape the cache
# input from the previous time step first.
k = cache["k"] = layers.concat(
[layers.reshape(
cache["k"], shape=[0, 0, d_model]), k], axis=1)
v = cache["v"] = layers.concat(
[layers.reshape(
cache["v"], shape=[0, 0, d_model]), v], axis=1)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_output_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_output_fc.b_0')
return proj_out
def positionwise_feed_forward(x,
d_inner_hid,
d_hid,
dropout_rate,
hidden_act,
param_initializer=None,
name='ffn'):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act=hidden_act,
param_attr=fluid.ParamAttr(
name=name + '_fc_0.w_0',
initializer=param_initializer),
bias_attr=name + '_fc_0.b_0')
if dropout_rate:
hidden = layers.dropout(
hidden,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_fc_1.w_0', initializer=param_initializer),
bias_attr=name + '_fc_1.b_0')
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0.,
name=''):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out_dtype = out.dtype
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float32")
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.ParamAttr(
name=name + '_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name=name + '_layer_norm_bias',
initializer=fluid.initializer.Constant(0.)))
if out_dtype == fluid.core.VarDesc.VarType.FP16:
out = layers.cast(x=out, dtype="float16")
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
pre_process_layer(
enc_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
None,
None,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
enc_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn'), ffd_output
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
model_name='',
name=''):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
checkpoints = []
for i in range(n_layer):
enc_output, cp = encoder_layer(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i))
checkpoints.append(cp)
enc_input = enc_output
enc_output = pre_process_layer(
enc_output, preprocess_cmd, prepostprocess_dropout, name=model_name+"post_encoder")
return enc_output, checkpoints
| 12,649 | 35.666667 | 91 | py |
RocketQA | RocketQA-main/rocketqa/model/__init__.py | 0 | 0 | 0 | py | |
RocketQA | RocketQA-main/rocketqa/model/cross_encoder_train.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from rocketqa.model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_train_model(args,
pyreader_name,
ernie_config,
is_prediction=False,
task_name=""):
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, 1], [-1, 1]],
dtypes=[
'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64'
],
lod_levels=[0, 0, 0, 0, 0, 0, 0],
name=task_name + "_" + pyreader_name,
use_double_buffer=True)
(src_ids, sent_ids, pos_ids, task_ids, input_mask, labels,
qids) = fluid.layers.read_file(pyreader)
def _model(is_noise=False):
ernie = ErnieModel(
src_ids=src_ids,
position_ids=pos_ids,
sentence_ids=sent_ids,
task_ids=task_ids,
input_mask=input_mask,
config=ernie_config,
is_noise=is_noise)
cls_feats = ernie.get_pooled_output()
if not is_noise:
cls_feats = fluid.layers.dropout(
x=cls_feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
logits = fluid.layers.fc(
input=cls_feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=task_name + "_cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=task_name + "_cls_out_b",
initializer=fluid.initializer.Constant(0.)))
num_seqs = fluid.layers.create_tensor(dtype='int64')
## add focal loss
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
accuracy = fluid.layers.accuracy(
input=probs, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy
}
return graph_vars
if not is_prediction:
graph_vars = _model(is_noise=True)
old_loss = graph_vars["loss"]
token_emb = fluid.default_main_program().global_block().var("word_embedding")
# print(token_emb)
token_emb.stop_gradient = False
token_gradient = fluid.gradients(old_loss, token_emb)[0]
token_gradient.stop_gradient = False
epsilon = 1e-8
norm = (fluid.layers.sqrt(
fluid.layers.reduce_sum(fluid.layers.square(token_gradient)) + epsilon))
gp = (0.01 * token_gradient) / norm
gp.stop_gradient = True
fluid.layers.assign(token_emb + gp, token_emb)
graph_vars = _model()
fluid.layers.assign(token_emb - gp, token_emb)
else:
graph_vars = _model()
return pyreader, graph_vars
def predict(exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1):
test_pyreader.start()
qids, scores, probs = [], [], []
preds = []
fetch_list = [graph_vars["probs"].name, graph_vars["qids"].name]
while True:
try:
if dev_count == 1:
np_probs, np_qids = exe.run(program=test_program,
fetch_list=fetch_list)
else:
np_probs, np_qids = exe.run(fetch_list=fetch_list)
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
probs.append(np_probs)
except fluid.core.EOFException:
test_pyreader.reset()
break
probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])
return qids, preds, probs
| 5,057 | 32.72 | 85 | py |
RocketQA | RocketQA-main/rocketqa/model/ernie.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ernie model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import six
import logging
import paddle.fluid as fluid
from io import open
from .transformer_encoder import encoder, pre_process_layer
log = logging.getLogger(__name__)
class ErnieConfig(object):
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing Ernie model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict.get(key, None)
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
log.info('%s: %s' % (arg, value))
log.info('------------------------------------------------')
class ErnieModel(object):
def __init__(self,
src_ids,
position_ids,
sentence_ids,
task_ids,
input_mask,
config,
weight_sharing=True,
model_name='',
is_noise=False):
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._voc_size = config['vocab_size']
self._max_position_seq_len = config['max_position_embeddings']
if config['sent_type_vocab_size']:
self._sent_types = config['sent_type_vocab_size']
else:
self._sent_types = config['type_vocab_size']
self._use_task_id = config['use_task_id']
if self._use_task_id:
self._task_types = config['task_type_vocab_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
if is_noise:
self._prepostprocess_dropout = 0
self._attention_dropout = 0
self._weight_sharing = weight_sharing
self.checkpoints = []
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._task_emb_name = "task_embedding"
self._emb_dtype = "float32"
# Initialize all weigths by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask)
def _build_model(self, model_name, src_ids, position_ids, sentence_ids, task_ids,
input_mask):
# padding id in vocabulary must be set to 0
emb_out = fluid.layers.embedding(
input=src_ids,
size=[self._voc_size, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
position_emb_out = fluid.layers.embedding(
input=position_ids,
size=[self._max_position_seq_len, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._pos_emb_name, initializer=self._param_initializer))
sent_emb_out = fluid.layers.embedding(
sentence_ids,
size=[self._sent_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._sent_emb_name, initializer=self._param_initializer))
emb_out = emb_out + position_emb_out
emb_out = emb_out + sent_emb_out
if self._use_task_id:
task_emb_out = fluid.layers.embedding(
task_ids,
size=[self._task_types, self._emb_size],
dtype=self._emb_dtype,
param_attr=fluid.ParamAttr(
name=model_name + self._task_emb_name,
initializer=self._param_initializer))
emb_out = emb_out + task_emb_out
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name=model_name + 'pre_encoder')
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
self._enc_out, self.checkpoints = encoder(
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
model_name=model_name,
name=model_name+'encoder')
def get_sequence_output(self):
return self._enc_out
def get_cls_output(self):
"""Get the first feature of each sequence for classification"""
cls_output = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
cls_output = fluid.layers.squeeze(cls_output, axes=[1])
return cls_output
def get_pooled_output(self, joint_training=0):
"""Get the first feature of each sequence for classification"""
next_sent_feat = fluid.layers.slice(
input=self._enc_out, axes=[1], starts=[0], ends=[1])
if joint_training == 0:
next_sent_feat = fluid.layers.fc(
input=next_sent_feat,
size=self._emb_size,
act="tanh",
param_attr=fluid.ParamAttr(
name="pooled_fc.w_0", initializer=self._param_initializer),
bias_attr="pooled_fc.b_0")
else:
next_sent_feat = fluid.layers.squeeze(next_sent_feat, axes=[1])
return next_sent_feat
def get_lm_output(self, mask_label, mask_pos):
"""Get the loss & accuracy for pretraining"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
# extract the first token feature in each sentence
self.next_sent_feat = self.get_pooled_output()
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = fluid.layers.layer_norm(
mask_trans_feat,
begin_norm_axis=len(mask_trans_feat.shape) - 1,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name='mask_lm_trans_layer_norm_bias',
initializer=fluid.initializer.Constant(1.)))
# transform: layer norm
#mask_trans_feat = pre_process_layer(
# mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._word_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._emb_dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=mask_label)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss
def get_task_output(self, task, task_labels):
task_fc_out = fluid.layers.fc(
input=self.next_sent_feat,
size=task["num_labels"],
param_attr=fluid.ParamAttr(
name=task["task_name"] + "_fc.w_0",
initializer=self._param_initializer),
bias_attr=task["task_name"] + "_fc.b_0")
task_loss, task_softmax = fluid.layers.softmax_with_cross_entropy(
logits=task_fc_out, label=task_labels, return_softmax=True)
task_acc = fluid.layers.accuracy(input=task_softmax, label=task_labels)
mean_task_loss = fluid.layers.mean(task_loss)
return mean_task_loss, task_acc
| 11,019 | 38.783394 | 92 | py |
RocketQA | RocketQA-main/examples/example.py | import os
import sys
import rocketqa
def train_dual_encoder(base_model, train_set):
dual_encoder = rocketqa.load_model(model=base_model, use_cuda=True, device_id=5, batch_size=16)
dual_encoder.train(train_set, 2, 'de_en_models', save_steps=10, learning_rate=1e-5, log_folder='de_en_log')
def train_cross_encoder(base_model, train_set):
cross_encoder = rocketqa.load_model(model=base_model, use_cuda=True, device_id=5, batch_size=16)
cross_encoder.train(train_set, 2, 'ce_en_models', save_steps=10, learning_rate=1e-5, log_folder='ce_en_log')
def test_dual_encoder(model, q_file, tp_file):
query_list = []
para_list = []
title_list = []
for line in open(q_file):
query_list.append(line.strip())
for line in open(tp_file):
t, p = line.strip().split('\t')
para_list.append(p)
title_list.append(t)
dual_encoder = rocketqa.load_model(model=model, use_cuda=True, device_id=1, batch_size=32)
q_embs = dual_encoder.encode_query(query=query_list)
for q in q_embs:
print (' '.join(str(ii) for ii in q))
p_embs = dual_encoder.encode_para(para=para_list, title=title_list)
for p in p_embs:
print (' '.join(str(ii) for ii in p))
ips = dual_encoder.matching(query=query_list, \
para=para_list[:len(query_list)], \
title=title_list[:len(query_list)])
for ip in ips:
print (ip)
def test_cross_encoder(model, q_file, tp_file):
query_list = []
para_list = []
title_list = []
for line in open(q_file):
query_list.append(line.strip())
for line in open(tp_file):
t, p = line.strip().split('\t')
para_list.append(p)
title_list.append(t)
cross_encoder = rocketqa.load_model(model=model, use_cuda=True, device_id=0, batch_size=32)
ranking_score = cross_encoder.matching(query=query_list, \
para=para_list[:len(query_list)], \
title=title_list[:len(query_list)])
for rs in ranking_score:
print (rs)
if __name__ == "__main__":
# finetune model
train_dual_encoder('zh_dureader_de', './examples/data/dual.train.tsv')
# train_cross_encoder('zh_dureader_ce', './examples/data/cross.train.tsv')
# test rocketqa model
#test_dual_encoder('zh_dureader_de_v2', './data/dureader.q', './data/marco.tp.1k')
#test_cross_encoder('zh_dureader_de_v2', './data/dureader.q', './data/marco.tp.1k')
# test your own model
# test_dual_encoder('./de_models/config.json', './data/dureader.q', './data/marco.tp.1k')
#test_cross_encoder('./ce_models/config.json', './data/dureader.q', './data/marco.tp.1k')
| 2,742 | 35.573333 | 112 | py |
RocketQA | RocketQA-main/examples/jina3_example/app.py | import sys
import os
import webbrowser
from pathlib import Path
from docarray import Document,DocumentArray
from jina import Flow
from quart import Quart,render_template
def config():
os.environ.setdefault('JINA_USE_CUDA', 'False')
os.environ.setdefault('JINA_PORT_EXPOSE', '8886')
os.environ.setdefault('JINA_WORKSPACE', './workspace')
os.environ.setdefault('public_ip', '1.1.1.1') #添加demo演示的公网ip
os.environ.setdefault('public_port', '1935') #演加demo演示的公网端口,请确认端口是否已经放行并未被占用
def index(file_name):
def load_marco(fn):
cnt = 0
docs = DocumentArray()
with open(fn, 'r') as f:
for ln, line in enumerate(f):
try:
title, para = line.strip().split('\t')
doc = Document(
id=f'{cnt}',
uri=fn,
tags={'title': title, 'para': para})
cnt += 1
docs.append(doc)
except:
print(f'skip line {ln}')
continue
return docs
f = Flow().load_config('flows/index.yml')
with f:
f.post(on='/index', inputs=load_marco(file_name), show_progress=True, request_size=32,return_response=True)
def fillin_html():
source_fn = Path(__file__).parent.absolute() / 'static/index_template.html'
target_fn = Path(__file__).parent.absolute() / 'static/index.html'
with open(source_fn, 'r') as fp, open(target_fn, 'w') as fw:
t = fp.read()
t = t.replace('{% JINA_PORT_EXPOSE %}',
f'{os.environ.get("JINA_PORT_EXPOSE")}')
fw.write(t)
def query():
from distutils.dir_util import copy_tree
fillin_html()
copy_tree('static', 'workspace/static')
url_html_fn = Path(__file__).parent.absolute() / 'workspace/static/index.html'
url_html_path = f'file://{url_html_fn}'
f = Flow().load_config('flows/query.yml')
with f:
try:
webbrowser.open(url_html_path, new=2)
except:
pass
finally:
print(f'You should see a demo page opened in your browser'
f'if not, you may open {url_html_path} manually')
f.block()
def query_cli():
def print_topk(resp):
for doc in resp.docs:
print(doc)
doc = Document(doc)
print(f'🤖 Answers:')
for m in doc.matches:
print(f'\t{m.tags["title"]}')
print(f'\t{m.tags["para"]}')
print(f'-----')
f = Flow().load_config('flows/query.yml')
with f:
f.protocol = 'grpc'
print(f'🤖 Hi there, please ask me questions related to the indexed Documents.\n'
'For example, "Who is Paula Deen\'s brother?"\n')
while True:
text = input('Question: (type `\q` to quit)')
if text == '\q' or not text:
return
f.post(on='/search', inputs=[Document(content=text), ], on_done=print_topk)
#运行此命令请先pip3 install quart
def query_web():
from distutils.dir_util import copy_tree
copy_tree('static', 'workspace/static')
public_port = os.environ.get("public_port")
public_ip = os.environ.get("public_ip")
jina_server_addr = f'http://{public_ip}:{os.environ.get("JINA_PORT_EXPOSE")}'
f = Flow().load_config('flows/query.yml')
with f:
try:
app2=Quart(__name__,template_folder='workspace/templates',
static_folder='workspace/static')
@app2.route('/')
async def index():
return await render_template('index.html',jina_server_addr=jina_server_addr)
app2.run(debug=True,port=public_port)
except:
pass
finally:
print(f'You should see a demo page opened in your browser'
f'if not, you may open {jina_server_addr} manually')
f.block()
def main(task):
config()
if task == 'index':
if Path('./workspace').exists():
print('./workspace exists, please deleted it if you want to reindexi')
data_fn = sys.argv[2] if len(sys.argv) >= 3 else 'toy_data/test.tsv'
print(f'indexing {data_fn}')
index(data_fn)
elif task == 'query':
query()
elif task == 'query_cli':
query_cli()
elif task == 'query_web':
query_web()
if __name__ == '__main__':
task = sys.argv[1]
main(task)
| 4,462 | 32.810606 | 115 | py |
RocketQA | RocketQA-main/examples/jina3_example/rocketqa_encoder/executor.py | from jina import Executor, requests
from docarray import Document,DocumentArray
import numpy as np
import rocketqa
class RocketQADualEncoder(Executor):
"""
Calculate the `embedding` of the passages and questions with RocketQA Dual-Encoder models.
"""
def __init__(self, model, use_cuda=False, device_id=0, batch_size=1, *args, **kwargs):
"""
:param model: A model name return by `rocketqa.available_models()` or the path of an user-specified
checkpoint config :param use_cuda: Set to `True` (default: `False`) to use GPU :param device_id: The GPU
device id to load the model. Set to integers starting from 0 to `N`, where `N` is the number of GPUs minus 1.
:param batch_size: the batch size during inference.
"""
super().__init__(*args, **kwargs)
self.encoder = rocketqa.load_model(model=model, use_cuda=use_cuda, device_id=device_id, batch_size=batch_size)
self.b_s = batch_size
@requests(on='/index')
def encode_passage(self, docs, **kwargs):
batch_generator =docs['@r'].batch(batch_size=self.b_s)
for batch in batch_generator:
titles, paras = batch[:,('tags__title','tags__para')]
para_embs = self.encoder.encode_para(para=paras, title=titles)
for doc, emb in zip(batch, para_embs):
doc.embedding = emb.squeeze()
@requests(on='/search')
def encode_question(self, docs, **kwargs):
for doc in docs:
query_emb = self.encoder.encode_query(query=[doc.text])
query_emb = np.array(list(query_emb))
doc.embedding = query_emb.squeeze()
| 1,651 | 42.473684 | 118 | py |
RocketQA | RocketQA-main/examples/jina3_example/rocketqa_reranker/executor.py | import numpy as np
import rocketqa
from jina import Executor, requests
from docarray import Document, DocumentArray
from docarray.score import NamedScore
class RocketQAReranker(Executor):
"""
Re-rank the `matches` of a Document based on the relevance to the question stored in the `text` field with RocketQA matching model.
"""
def __init__(self, model, use_cuda=False, device_id=0, batch_size=1, *args, **kwargs):
"""
:param model: A model name return by `rocketqa.available_models()` or the path of an user-specified checkpoint config
:param use_cuda: Set to `True` (default: `False`) to use GPU
:param device_id: The GPU device id to load the model. Set to integers starting from 0 to `N`, where `N` is the number of GPUs minus 1.
:param batch_size: the batch size during inference.
"""
super().__init__(*args, **kwargs)
self.encoder = rocketqa.load_model(model=model, use_cuda=use_cuda, device_id=device_id, batch_size=batch_size)
self.b_s = batch_size
@requests(on='/search')
def rank(self, docs, **kwargs):
for doc in docs:
question = doc.text
doc_arr = DocumentArray([doc])
match_batches_generator = (doc_arr['@m']
.batch(batch_size=self.b_s))
reranked_matches = DocumentArray()
reranked_scores = []
unsorted_matches = DocumentArray()
for matches in match_batches_generator:
titles, paras = matches[:,('tags__title', 'tags__para')]
score_list = self.encoder.matching(query=[question] * len(paras), para=paras, title=titles)
reranked_scores.extend(score_list)
unsorted_matches += list(matches)
sorted_args = np.argsort(reranked_scores).tolist()
sorted_args.reverse()
for idx in sorted_args:
score = reranked_scores[idx]
m = Document(
id=unsorted_matches[idx].id,
tags={
'title': unsorted_matches[idx].tags['title'],
'para': unsorted_matches[idx].tags['para']
}
)
m.scores['relevance'] = NamedScore(value=score)
reranked_matches.append(m)
doc.matches = reranked_matches
| 2,407 | 43.592593 | 143 | py |
RocketQA | RocketQA-main/examples/es_example/query.py | # -*- coding: utf-8 -*-
import sys
import time
import numpy as np
import rocketqa
from elasticsearch import Elasticsearch
class Querier:
def __init__(self, es_client, index_name, de_model, ce_model):
self.es_client = es_client
self.index_name = index_name
self.dual_encoder = rocketqa.load_model(
model=de_model,
use_cuda=False, # GPU: True
device_id=0,
batch_size=32,
)
self.cross_encoder = rocketqa.load_model(
model=ce_model,
use_cuda=False, # GPU: True
device_id=0,
batch_size=32,
)
def encode(self, query):
embs = self.dual_encoder.encode_query(query=[query])
vector = list(embs)[0]
# Normalize the NumPy array to a unit vector to use `dot_product` similarity,
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-params.
vector = vector / np.linalg.norm(vector)
return vector
def search(self, query, topk=10):
vector = self.encode(query)
knn = dict(
field="vector",
query_vector=vector,
k=topk,
num_candidates=100,
)
result = self.es_client.knn_search(index=self.index_name, knn=knn)
candidates = [
dict(
title=doc['_source']['title'],
para=doc['_source']['paragraph'],
)
for doc in result['hits']['hits']
]
return candidates
def sort(self, query, candidates):
queries = [query] * len(candidates)
titles = [c['title'] for c in candidates]
paras = [c['para'] for c in candidates]
ranking_score = self.cross_encoder.matching(query=queries, para=paras, title=titles)
answers = [
dict(
title=titles[i],
para=paras[i],
score=score,
)
for i, score in enumerate(ranking_score)
]
return sorted(answers, key=lambda a: a['score'], reverse=True)
def main():
es_client = Elasticsearch(
"https://localhost:9200",
http_auth=("elastic", "123456"),
verify_certs=False,
)
querier = Querier(es_client, "test-index", 'zh_dureader_de_v2', 'zh_dureader_ce_v2')
while True:
query = input('Query: ')
candidates = querier.search(query)
print('Candidates:')
for c in candidates:
print(c['title'], '\t', c['para'])
answers = querier.sort(query, candidates)
print('Answers:')
for a in answers:
print(a['title'], '\t', a['para'], '\t', a['score'])
if __name__ == '__main__':
main()
| 2,772 | 27.885417 | 116 | py |
RocketQA | RocketQA-main/examples/es_example/index.py | # -*- coding: utf-8 -*-
import argparse
import os
import sys
import faiss
import numpy as np
import rocketqa
from elasticsearch import Elasticsearch, helpers
class Indexer:
def __init__(self, es_client, index_name, model):
self.es_client = es_client
self.index_name = index_name
self.dual_encoder = rocketqa.load_model(
model=model,
use_cuda=False, # GPU: True
device_id=0,
batch_size=32,
)
def index(self, tps):
titles, paras = zip(*tps)
embs = self.dual_encoder.encode_para(para=paras, title=titles)
def gen_actions():
for i, emb in enumerate(embs):
# Normalize the NumPy array to a unit vector to use `dot_product` similarity,
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/dense-vector.html#dense-vector-params.
emb = emb / np.linalg.norm(emb)
yield dict(
_index=self.index_name,
_id=i+1,
_source=dict(
title=titles[i],
paragraph=paras[i],
vector=emb,
),
)
return helpers.bulk(self.es_client, gen_actions())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('lang', choices=['zh', 'en'], help='The language')
parser.add_argument('data_file', help='The data file')
parser.add_argument('index_name', help='The index name')
args = parser.parse_args()
if args.lang == 'zh':
model = 'zh_dureader_de_v2'
elif args.lang == 'en':
model = 'v1_marco_de'
with open(args.data_file) as f:
tps = [line.strip().split('\t') for line in f]
es_client = Elasticsearch(
"https://localhost:9200",
http_auth=("elastic", "123456"),
verify_certs=False,
)
indexer = Indexer(es_client, args.index_name, model)
result = indexer.index(tps)
print(result)
if __name__ == '__main__':
main()
| 2,088 | 27.616438 | 124 | py |
RocketQA | RocketQA-main/examples/faiss_example/rocketqa_service.py | import os
import sys
import json
import faiss
import numpy as np
from tornado import web
from tornado import ioloop
import rocketqa
class FaissTool():
"""
Faiss index tools
"""
def __init__(self, text_filename, index_filename):
self.engine = faiss.read_index(index_filename)
self.id2text = []
for line in open(text_filename):
self.id2text.append(line.strip())
def search(self, q_embs, topk=5):
res_dist, res_pid = self.engine.search(q_embs, topk)
result_list = []
for i in range(topk):
result_list.append(self.id2text[res_pid[0][i]])
return result_list
class RocketQAServer(web.RequestHandler):
def __init__(self, application, request, **kwargs):
web.RequestHandler.__init__(self, application, request)
self._faiss_tool = kwargs["faiss_tool"]
self._dual_encoder = kwargs["dual_encoder"]
self._cross_encoder = kwargs["cross_encoder"]
def get(self):
"""
Get request
"""
def post(self):
input_request = self.request.body
output = {}
output['error_code'] = 0
output['error_message'] = ''
output['answer'] = []
if input_request is None:
output['error_code'] = 1
output['error_message'] = "Input is empty"
self.write(json.dumps(output))
return
try:
input_data = json.loads(input_request)
except:
output['error_code'] = 2
output['error_message'] = "Load input request error"
self.write(json.dumps(output))
return
if "query" not in input_data:
output['error_code'] = 3
output['error_message'] = "[Query] is missing"
self.write(json.dumps(output))
return
query = input_data['query']
topk = 5
if "topk" in input_data:
topk = input_data['topk']
# encode query
q_embs = self._dual_encoder.encode_query(query=[query])
q_embs = np.array(list(q_embs))
# search with faiss
search_result = self._faiss_tool.search(q_embs, topk)
titles = []
paras = []
queries = []
for t_p in search_result:
queries.append(query)
t, p = t_p.split('\t')
titles.append(t)
paras.append(p)
ranking_score = self._cross_encoder.matching(query=queries, para=paras, title=titles)
ranking_score = list(ranking_score)
final_result = {}
for i in range(len(paras)):
final_result[query + '\t' + titles[i] + '\t' + paras[i]] = ranking_score[i]
sort_res = sorted(final_result.items(), key=lambda a:a[1], reverse=True)
for qtp, score in sort_res:
one_answer = {}
one_answer['probability'] = score
q, t, p = qtp.split('\t')
one_answer['title'] = t
one_answer['para'] = p
output['answer'].append(one_answer)
result_str = json.dumps(output, ensure_ascii=False)
self.write(result_str)
def create_rocketqa_app(sub_address, rocketqa_server, language, data_file, index_file):
"""
Create RocketQA server application
"""
if language == 'zh':
de_model = 'zh_dureader_de_v2'
ce_model = 'zh_dureader_ce_v2'
else:
de_model = 'v1_marco_de'
ce_model = 'v1_marco_ce'
de_conf = {
"model": de_model,
"use_cuda": True,
"device_id": 0,
"batch_size": 32
}
ce_conf = {
"model": ce_model,
"use_cuda": True,
"device_id": 0,
"batch_size": 32
}
dual_encoder = rocketqa.load_model(**de_conf)
cross_encoder = rocketqa.load_model(**ce_conf)
faiss_tool = FaissTool(data_file, index_file)
print ('Load index done')
return web.Application([(sub_address, rocketqa_server, \
dict(faiss_tool=faiss_tool, \
dual_encoder=dual_encoder, \
cross_encoder=cross_encoder))])
if __name__ == "__main__":
if len(sys.argv) != 4:
print ("USAGE: ")
print (" python3 rocketqa_service.py ${language} ${data_file} ${index_file}")
print ("--For Example:")
print (" python3 rocketqa_service.py zh ../data/dureader.para test.index")
exit()
language = sys.argv[1]
if language != 'en' and language != 'zh':
print ("illegal language, only [zh] and [en] is supported", file=sys.stderr)
exit()
data_file = sys.argv[2]
index_file = sys.argv[3]
sub_address = r'/rocketqa'
port = '8888'
app = create_rocketqa_app(sub_address, RocketQAServer, language, data_file, index_file)
app.listen(port)
ioloop.IOLoop.current().start()
| 4,878 | 28.932515 | 93 | py |
RocketQA | RocketQA-main/examples/faiss_example/query.py | import sys
import requests
import json
SERVICE_ADD = 'http://localhost:8888/rocketqa'
TOPK = 5
while 1:
query = input("please input a query:\t")
if query.strip() == '':
break
input_data = {}
input_data['query'] = query
input_data['topk'] = TOPK
json_str = json.dumps(input_data)
result = requests.post(SERVICE_ADD, json=input_data)
res_json = json.loads(result.text)
print ("QUERY:\t" + query)
for i in range(TOPK):
title = res_json['answer'][i]['title']
para = res_json['answer'][i]['para']
score = res_json['answer'][i]['probability']
print ('{}'.format(i + 1) + '\t' + title + '\t' + para + '\t' + str(score))
| 699 | 24 | 83 | py |
RocketQA | RocketQA-main/examples/faiss_example/index.py | import os
import sys
import numpy as np
import faiss
import rocketqa
def build_index(encoder_conf, index_file_name, title_list, para_list):
dual_encoder = rocketqa.load_model(**encoder_conf)
para_embs = dual_encoder.encode_para(para=para_list, title=title_list)
para_embs = np.array(list(para_embs))
print("Building index with Faiss...")
indexer = faiss.IndexFlatIP(768)
indexer.add(para_embs.astype('float32'))
faiss.write_index(indexer, index_file_name)
if __name__ == '__main__':
if len(sys.argv) != 4:
print ("USAGE: ")
print (" python3 index.py ${language} ${data_file} ${index_file}")
print ("--For Example:")
print (" python3 index.py zh ../data/dureader.para test.index")
exit()
language = sys.argv[1]
data_file = sys.argv[2]
index_file = sys.argv[3]
if language == 'zh':
model = 'zh_dureader_de_v2'
elif language == 'en':
model = 'v1_marco_de'
else:
print ("illegal language, only [zh] and [en] is supported", file=sys.stderr)
exit()
para_list = []
title_list = []
for line in open(data_file):
t, p = line.strip().split('\t')
para_list.append(p)
title_list.append(t)
de_conf = {
"model": model,
"use_cuda": True,
"device_id": 0,
"batch_size": 32
}
build_index(de_conf, index_file, title_list, para_list)
| 1,456 | 26.490566 | 84 | py |
RocketQA | RocketQA-main/examples/jina_example/app.py | import sys
import os
import webbrowser
from pathlib import Path
from jina import Document, Flow
def config():
os.environ.setdefault('JINA_USE_CUDA', 'False')
os.environ.setdefault('JINA_PORT_EXPOSE', '8886')
os.environ.setdefault('JINA_WORKSPACE', './workspace')
def index(file_name):
def load_marco(fn):
cnt = 0
with open(fn, 'r') as f:
for ln, line in enumerate(f):
try:
title, para = line.strip().split('\t')
doc = Document(
id=f'{cnt}',
uri=fn,
tags={'title': title, 'para': para})
cnt += 1
yield doc
except:
print(f'skip line {ln}')
continue
f = Flow().load_config('flows/index.yml')
with f:
f.post(on='/index', inputs=load_marco(file_name), show_progress=True, request_size=32)
def fillin_html():
source_fn = Path(__file__).parent.absolute() / 'static/index_template.html'
target_fn = Path(__file__).parent.absolute() / 'static/index.html'
with open(source_fn, 'r') as fp, open(target_fn, 'w') as fw:
t = fp.read()
t = t.replace('{% JINA_PORT_EXPOSE %}',
f'{os.environ.get("JINA_PORT_EXPOSE")}')
fw.write(t)
def query():
from distutils.dir_util import copy_tree
fillin_html()
copy_tree('static', 'workspace/static')
url_html_fn = Path(__file__).parent.absolute() / 'workspace/static/index.html'
url_html_path = f'file://{url_html_fn}'
f = Flow().load_config('flows/query.yml')
with f:
try:
webbrowser.open(url_html_path, new=2)
except:
pass
finally:
print(f'You should see a demo page opened in your browser'
f'if not, you may open {url_html_path} manually')
f.block()
def query_cli():
def print_topk(resp):
for doc in resp.docs:
print(doc)
doc = Document(doc)
print(f'🤖 Answers:')
for m in doc.matches:
print(f'\t{m.tags["title"]}')
print(f'\t{m.tags["para"]}')
print(f'-----')
f = Flow().load_config('flows/query.yml')
with f:
f.protocol = 'grpc'
print(f'🤖 Hi there, please ask me questions related to the indexed Documents.\n'
'For example, "Who is Paula Deen\'s brother?"\n')
while True:
text = input('Question: (type `\q` to quit)')
if text == '\q' or not text:
return
f.post(on='/search', inputs=[Document(content=text), ], on_done=print_topk)
def main(task):
config()
if task == 'index':
if Path('./workspace').exists():
print('./workspace exists, please deleted it if you want to reindexi')
data_fn = sys.argv[2] if len(sys.argv) >= 3 else 'toy_data/test.tsv'
print(f'indexing {data_fn}')
index(data_fn)
elif task == 'query':
query()
elif task == 'query_cli':
query_cli()
if __name__ == '__main__':
task = sys.argv[1]
main(task)
| 3,206 | 29.542857 | 94 | py |
RocketQA | RocketQA-main/examples/jina_example/rocketqa_encoder/executor.py | import numpy as np
from jina import Executor, requests
import rocketqa
class RocketQADualEncoder(Executor):
"""
Calculate the `embedding` of the passages and questions with RocketQA Dual-Encoder models.
"""
def __init__(self, model, use_cuda=False, device_id=0, batch_size=1, *args, **kwargs):
"""
:param model: A model name return by `rocketqa.available_models()` or the path of an user-specified
checkpoint config :param use_cuda: Set to `True` (default: `False`) to use GPU :param device_id: The GPU
device id to load the model. Set to integers starting from 0 to `N`, where `N` is the number of GPUs minus 1.
:param batch_size: the batch size during inference.
"""
super().__init__(*args, **kwargs)
self.encoder = rocketqa.load_model(model=model, use_cuda=use_cuda, device_id=device_id, batch_size=batch_size)
self.b_s = batch_size
@requests(on='/index')
def encode_passage(self, docs, **kwargs):
batch_generator = (
docs.traverse_flat(
traversal_paths='r',
filter_fn=lambda d: d.tags.get('title', None) is not None and d.tags.get('para', None) is not None)
.batch(batch_size=self.b_s))
for batch in batch_generator:
titles, paras = batch.get_attributes('tags__title', 'tags__para')
para_embs = self.encoder.encode_para(para=paras, title=titles)
for doc, emb in zip(batch, para_embs):
doc.embedding = emb.squeeze()
@requests(on='/search')
def encode_question(self, docs, **kwargs):
for doc in docs:
query_emb = self.encoder.encode_query(query=[doc.text])
query_emb = np.array(list(query_emb))
doc.embedding = query_emb.squeeze()
| 1,817 | 42.285714 | 118 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.