text
stringlengths 1
93.6k
|
|---|
"""Initialize transformations given the dataset name and configurations.
|
Args:
|
config_tr: A dictionary of transformation parameters.
|
Returns a composition of transformations.
|
"""
|
config_tr = clean_config(config_tr)
|
if "timm_resize_crop_norm" in config_tr:
|
config_tr = timm_resize_crop_norm(config_tr)
|
transforms = []
|
for t_name, t_class in TRANSFORMATION_TO_NAME.items():
|
if t_name in config_tr:
|
# TODO: warn for every key in config_tr that was not used
|
transforms += [t_class(**config_tr[t_name])]
|
return T.Compose(transforms)
|
class MixUp(torch.nn.Module):
|
r"""MixUp image transformation.
|
For an input x the
|
output is :math:`\lambda x + (1-\lambda) x_p` , where :math:`x_p` is a
|
random permutation of `x` along the batch dimension, and lam is a random
|
number between 0 and 1.
|
See https://arxiv.org/abs/1710.09412 for more details.
|
"""
|
def __init__(
|
self, alpha: float = 1.0, p: float = 1.0, div_by: float = 1.0, *args, **kwargs
|
) -> None:
|
"""Initialize MixUp transformation.
|
Args:
|
alpha: A positive real number that determines the sampling
|
distribution. Each mixed sample is a convex combination of two
|
examples from the batch with mixing coefficient lambda.
|
lambda is sampled from a symmetric Beta distribution with
|
parameter alpha. When alpha=0 no mixing happens. Defaults to 1.0.
|
p: Mixing is applied with probability `p`. Defaults to 1.0.
|
div_by: Divide the lambda by a constant. Set to 2.0 to make sure mixing is
|
biased towards the first input. Defaults to 1.0.
|
"""
|
super().__init__(*args, **kwargs)
|
assert alpha >= 0
|
assert p >= 0 and p <= 1.0
|
assert div_by >= 1.0
|
self.alpha = alpha
|
self.p = p
|
self.div_by = div_by
|
def get_params(self, alpha: float, div_by: float) -> float:
|
"""Return MixUp random parameters."""
|
# Skip mixing by probability 1-self.p
|
if alpha == 0 or torch.rand(1) > self.p:
|
return None
|
lam = np.random.beta(alpha, alpha) / div_by
|
return lam
|
def forward(
|
self,
|
x: Tensor,
|
x2: Optional[Tensor] = None,
|
y: Optional[Tensor] = None,
|
y2: Optional[Tensor] = None,
|
) -> Tuple[Tensor, Tensor]:
|
r"""Apply pixel-space mixing to a batch of examples.
|
Args:
|
x: A tensor with a batch of samples. Shape: [batch_size, ...].
|
x2: A tensor with exactly one matching sample for any input in `x`. Shape:
|
[batch_size, ...].
|
y: A tensor of target labels. Shape: [batch_size, ...].
|
y2: A tensor of target labels for paired samples. Shape: [batch_size, ...].
|
Returns:
|
Mixed x tensor, y labels, and dictionary of mixing parameter {'lam': lam}.
|
"""
|
alpha = self.alpha
|
# Randomly sample lambda if not provided
|
params = self.get_params(alpha, self.div_by)
|
if params is None:
|
return x, y
|
lam = params
|
# Randomly sample second input from the same mini-batch if not provided
|
if x2 is None:
|
batch_size = int(x.size()[0])
|
index = torch.randperm(batch_size, device=x.device)
|
x2 = x[index, :]
|
y2 = y[index, :] if y is not None else None
|
# Mix inputs and labels
|
mixed_x = lam * x + (1 - lam) * x2
|
mixed_y = y
|
if y is not None:
|
mixed_y = lam * y + (1 - lam) * y2
|
return mixed_x, mixed_y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.