Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | DA-Transformer-main/fairseq/modules/quantization/pq/pq.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .em import EM, EmptyClusterResolveError
class PQ(EM):
"""
Quantizes the layer weights W with the standard Product Quantization
technique. This learns a codebook of codewords or centroids of size
block_size from W. For further reference on using PQ to quantize
neural networks, see "And the Bit Goes Down: Revisiting the Quantization
of Neural Networks", Stock et al., ICLR 2020.
PQ is performed in two steps:
(1) The matrix W (weights or fully-connected or convolutional layer)
is reshaped to (block_size, -1).
- If W is fully-connected (2D), its columns are split into
blocks of size block_size.
- If W is convolutional (4D), its filters are split along the
spatial dimension.
(2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix.
Args:
- W: weight matrix to quantize of size (in_features x out_features)
- block_size: size of the blocks (subvectors)
- n_centroids: number of centroids
- n_iter: number of k-means iterations
- eps: for cluster reassignment when an empty cluster is found
- max_tentatives for cluster reassignment when an empty cluster is found
- verbose: print information after each iteration
Remarks:
- block_size be compatible with the shape of W
"""
def __init__(
self,
W,
block_size,
n_centroids=256,
n_iter=20,
eps=1e-6,
max_tentatives=30,
verbose=True,
):
self.block_size = block_size
W_reshaped = self._reshape(W)
super(PQ, self).__init__(
W_reshaped,
n_centroids=n_centroids,
n_iter=n_iter,
eps=eps,
max_tentatives=max_tentatives,
verbose=verbose,
)
def _reshape(self, W):
"""
Reshapes the matrix W as expained in step (1).
"""
# fully connected: by convention the weight has size out_features x in_features
if len(W.size()) == 2:
self.out_features, self.in_features = W.size()
assert (
self.in_features % self.block_size == 0
), "Linear: n_blocks must be a multiple of in_features"
return (
W.reshape(self.out_features, -1, self.block_size)
.permute(2, 1, 0)
.flatten(1, 2)
)
# convolutional: we reshape along the spatial dimension
elif len(W.size()) == 4:
self.out_channels, self.in_channels, self.k_h, self.k_w = W.size()
assert (
self.in_channels * self.k_h * self.k_w
) % self.block_size == 0, (
"Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w"
)
return (
W.reshape(self.out_channels, -1, self.block_size)
.permute(2, 1, 0)
.flatten(1, 2)
)
# not implemented
else:
raise NotImplementedError(W.size())
def encode(self):
"""
Performs self.n_iter EM steps.
"""
self.initialize_centroids()
for i in range(self.n_iter):
try:
self.step(i)
except EmptyClusterResolveError:
break
def decode(self):
"""
Returns the encoded full weight matrix. Must be called after
the encode function.
"""
# fully connected case
if "k_h" not in self.__dict__:
return (
self.centroids[self.assignments]
.reshape(-1, self.out_features, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
# convolutional case
else:
return (
self.centroids[self.assignments]
.reshape(-1, self.out_channels, self.block_size)
.permute(1, 0, 2)
.reshape(self.out_channels, self.in_channels, self.k_h, self.k_w)
)
| 4,292 | 32.27907 | 87 | py |
null | DA-Transformer-main/fairseq/modules/quantization/pq/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
from operator import attrgetter, itemgetter
import torch
import numpy as np
import torch.distributed as dist
import torch.nn as nn
from .modules import PQConv2d, PQEmbedding, PQLinear
from .pq import PQ
def quantize_model_(
model,
size_tracker,
layers_to_quantize,
block_sizes_config,
n_centroids_config,
step=0,
n_iter=15,
eps=1e-6,
max_tentatives=100,
remove_weights=False,
verbose=True,
state_dict=None,
):
"""
Quantize a model in-place by stages. All the targeted
layers are replaced by their quantized counterpart,
and the model is ready for the finetuning of the
centroids in a standard training loop (no modifications
required). Note that we do not quantize biases.
Args:
- model: a nn.Module
- size_tracker: useful for tracking quatization statistics
- layers_to_quantize: a list containing regexps for
filtering the layers to quantize at each stage according
to their name (as in model.named_parameters())
- block_sizes_config: dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
- n_centroids_config: dict like
{
'Conv2d': ('kernel_size', {'*': 256}),
'Linear': ('in_features', {'*': 256})
}
For instance, all conv2d layers are quantized with 256 centroids
- step: the layers to quantize inplace corresponding
to layers_to_quantize[step]
"""
quantized_layers = get_layers(
model, layers_to_quantize[step], remove_weights=remove_weights
)
for layer in quantized_layers:
# book-keeping
is_master_process = (not dist.is_initialized()) or (
dist.is_initialized() and dist.get_rank() == 0
)
verbose = verbose and is_master_process
# get block size and centroids
module = attrgetter(layer)(model)
block_size = get_param(module, layer, block_sizes_config)
n_centroids = get_param(module, layer, n_centroids_config)
if verbose:
logging.info(
f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids"
)
# quantize layer
weight = module.weight.data.clone()
is_bias = "bias" in [x[0] for x in module.named_parameters()]
bias = module.bias.data.clone() if is_bias else None
quantizer = PQ(
weight,
block_size,
n_centroids=n_centroids,
n_iter=n_iter,
eps=eps,
max_tentatives=max_tentatives,
verbose=verbose,
)
# quantization performed on all GPUs with same seed
quantizer.encode()
centroids = quantizer.centroids.contiguous()
assignments = quantizer.assignments.contiguous()
# If n_iter = 0 and state_dict is provided, then
# we initialize random assignments and centroids to
# random values of the appropriate dimensions
# because the quantized model parameters will
# overwritten by the state_dict later on.
if n_iter == 0 and state_dict:
# Initialize random centroids of the correct size
centroids = torch.rand(centroids.size())
centroids.cuda()
# Get counts and assignment keys from layer in loaded checkpoint.
counts_key = layer + "." + "counts"
assignment_key = layer + "." + "assignments"
# Get number of different bins to include.
counts = list(state_dict[counts_key].shape)[0]
print(layer)
print(state_dict[counts_key])
print(counts)
# Initialize random assignments of the correct size
# with an appropriate number of bins.
num_assignments = list(state_dict[assignment_key].shape)[0]
num_extra = num_assignments - counts
print(num_assignments)
print(num_extra)
assignments_bins = torch.arange(counts)
assignments_rand = torch.randint(0, counts - 1, (num_extra,))
assignments = torch.cat((assignments_bins, assignments_rand), 0)
# assignments = assignments.type(torch.IntTensor)
assignments.cuda()
print("assignments")
print(assignments)
# broadcast results to make sure weights are up-to-date
if dist.is_initialized():
dist.broadcast(centroids, 0)
dist.broadcast(assignments, 0)
# instantiate the quantized counterpart
if isinstance(module, nn.Linear):
out_features, in_features = map(
lambda k: module.__dict__[k], ["out_features", "in_features"]
)
quantized_module = PQLinear(
centroids, assignments, bias, in_features, out_features
)
elif isinstance(module, nn.Embedding):
num_embeddings, embedding_dim = map(
lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"]
)
quantized_module = PQEmbedding(
centroids, assignments, num_embeddings, embedding_dim
)
elif isinstance(module, nn.Conv2d):
out_channels, in_channels, kernel_size = map(
lambda k: module.__dict__[k],
["out_channels", "in_channels", "kernel_size"],
)
stride, padding, dilation, groups, padding_mode = map(
lambda k: module.__dict__[k],
["stride", "padding", "dilation", "groups", "padding_mode"],
)
quantized_module = PQConv2d(
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
)
else:
raise ValueError(f"Module {module} not yet supported for quantization")
# replace layer by its quantized counterpart
attrsetter(layer)(model, quantized_module)
# update statistics
size_tracker.update(weight, block_size, n_centroids)
# return name of quantized layers
return quantized_layers
def get_layers(model, filter_regexp, remove_weights=False):
"""
Filters out the layers according to a regexp. Note that
we omit biases.
Args:
- model: a nn.Module
- filter_regexp: a regexp to filter the layers to keep
according to their name in model.named_parameters().
For instance, the regexp:
down_layers\\.[123456]\\.(conv[12]|identity\\.conv))
is keeping blocks down_layers from 1 to 6, and inside
each block is keeping conv1, conv2 and identity.conv.
Remarks:
- We add (module\\.)? at the beginning of the regexp to
account for the possible use of nn.parallel.DataParallel
"""
# get all parameter names
all_layers = map(itemgetter(0), model.named_parameters())
# remove biases
all_layers = filter(lambda x: "bias" not in x, all_layers)
# remove .weight in all other names (or .weight_orig is spectral norm)
all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers)
# remove weights indicates whether the weights extension should be removed, in addition to
# weight_orig and weight extension on names
if remove_weights:
all_layers = map(lambda x: x.replace(".weights", ""), all_layers)
all_layers = map(lambda x: x.replace(".weight", ""), all_layers)
# return filtered layers
filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")"
r = re.compile(filter_regexp)
return list(filter(r.match, all_layers))
def get_param(module, layer_name, param_config):
"""
Given a quantization configuration, get the right parameter
for the module to be quantized.
Args:
- module: a nn.Module
- layer_name: the name of the layer
- param_config: a dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
Remarks:
- if 'fuzzy_name' is passed as a parameter, layers whose layer_name
include 'fuzzy_name' will be assigned the given parameter.
In the following example, conv.expand layers will have a block
size of 9 while conv.reduce will have a block size of 4 and all
other layers will have a block size of 2.
{
'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}),
'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4})
}
"""
layer_type = module.__class__.__name__
if layer_type not in param_config:
raise KeyError(f"Layer type {layer_type} not in config for layer {module}")
feature, params = param_config[module.__class__.__name__]
if feature != "fuzzy_name":
feature_value = str(getattr(module, feature))
if feature_value not in params:
if "*" in params:
feature_value = "*"
else:
raise KeyError(
f"{feature}={feature_value} not in config for layer {module}"
)
else:
feature_values = [name for name in params if name in layer_name]
if len(feature_values) == 0:
if "*" in params:
feature_value = "*"
else:
raise KeyError(f"name={layer_name} not in config for {module}")
else:
feature_value = feature_values[0]
return params[feature_value]
class SizeTracker(object):
"""
Class to keep track of the compressed network size with iPQ.
Args:
- model: a nn.Module
Remarks:
- The compressed size is the sum of three components
for each layer in the network:
(1) Storing the centroids given by iPQ in fp16
(2) Storing the assignments of the blocks in int8
(3) Storing all non-compressed elements such as biases
- This cost in only valid if we use 256 centroids (then
indexing can indeed by done with int8).
"""
def __init__(self, model):
self.model = model
self.size_non_compressed_model = self.compute_size()
self.size_non_quantized = self.size_non_compressed_model
self.size_index = 0
self.size_centroids = 0
self.n_quantized_layers = 0
def compute_size(self):
"""
Computes the size of the model (in MB).
"""
res = 0
for _, p in self.model.named_parameters():
res += p.numel()
return res * 4 / 1024 / 1024
def update(self, W, block_size, n_centroids):
"""
Updates the running statistics when quantizing a new layer.
"""
# bits per weights
bits_per_weight = np.log2(n_centroids) / block_size
self.n_quantized_layers += 1
# size of indexing the subvectors of size block_size (in MB)
size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024
self.size_index += size_index_layer
# size of the centroids stored in float16 (in MB)
size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024
self.size_centroids += size_centroids_layer
# size of non-compressed layers, e.g. LayerNorms or biases (in MB)
size_uncompressed_layer = W.numel() * 4 / 1024 / 1024
self.size_non_quantized -= size_uncompressed_layer
def __repr__(self):
size_compressed = (
self.size_index + self.size_centroids + self.size_non_quantized
)
compression_ratio = self.size_non_compressed_model / size_compressed # NOQA
return (
f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. "
f"After quantizing {self.n_quantized_layers} layers, size "
f"(indexing + centroids + other): {self.size_index:.2f} MB + "
f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = "
f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x"
)
def attrsetter(*items):
def resolve_attr(obj, attr):
attrs = attr.split(".")
head = attrs[:-1]
tail = attrs[-1]
for name in head:
obj = getattr(obj, name)
return obj, tail
def g(obj, val):
for attr in items:
resolved_obj, resolved_attr = resolve_attr(obj, attr)
setattr(resolved_obj, resolved_attr, val)
return g
| 13,493 | 34.793103 | 100 | py |
null | DA-Transformer-main/fairseq/modules/quantization/pq/modules/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qconv import PQConv2d # NOQA
from .qemb import PQEmbedding # NOQA
from .qlinear import PQLinear # NOQA
| 290 | 31.333333 | 65 | py |
null | DA-Transformer-main/fairseq/modules/quantization/pq/modules/qconv.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class PQConv2d(nn.Module):
"""
Quantized counterpart of nn.Conv2d module. Stores the centroid, the assignments
and the non-quantized biases. The full weight is re-instantiated at each forward
pass and autograd automatically computes the gradients with respect to the
centroids.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_channels x n_blocks
- bias: the non-quantized bias, must be either torch.Tensor or None
Remarks:
- We refer the reader to the official documentation of the nn.Conv2d module
for the other arguments and the behavior of the module.
- Performance tests on GPU show that this implementation is 10% slower than
the non-quantized nn.Conv2d module for a standard training loop.
- During the backward, the gradients are averaged by cluster and not summed.
This explains the hook registered to the centroids.
"""
def __init__(
self,
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode="zeros",
):
super(PQConv2d, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.padding_mode = padding_mode
# check compatibility
if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % out_channels != 0:
raise ValueError("Wrong PQ sizes")
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
if out_channels % groups != 0:
raise ValueError("out_channels must be divisible by groups")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
# register hook for averaging gradients per centroids instead of summing
self.centroids.register_hook(lambda x: x / self.counts[:, None])
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.out_channels, self.block_size)
.permute(1, 0, 2)
.reshape(
self.out_channels, self.in_channels // self.groups, *self.kernel_size
)
)
def forward(self, x):
return F.conv2d(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def extra_repr(self):
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
if self.padding_mode != "zeros":
s += ", padding_mode={padding_mode}"
s += ", n_centroids={n_centroids}, block_size={block_size}"
return s.format(**self.__dict__)
| 4,245 | 35.603448 | 87 | py |
null | DA-Transformer-main/fairseq/modules/quantization/pq/modules/qemb.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class PQEmbedding(nn.Module):
"""
Quantized counterpart of nn.Embedding module. Stores the centroids and
the assignments. The full weight is re-instantiated at each forward
pass.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_features x n_blocks
- bias: the non-quantized bias
Remarks:
- We refer the reader to the official documentation of the nn.Embedding module
for the other arguments and the behavior of the module
- Performance tests on GPU show that this implementation is 10% slower than
the non-quantized nn.Embedding module for a standard training loop.
"""
def __init__(
self,
centroids,
assignments,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
):
super(PQEmbedding, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
# check compatibility
if self.embedding_dim % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.num_embeddings != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.num_embeddings, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
def forward(self, input):
return F.embedding(
input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
def extra_repr(self):
s = "{num_embeddings}, {embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
if self.max_norm is not None:
s += ", max_norm={max_norm}"
if self.norm_type != 2:
s += ", norm_type={norm_type}"
if self.scale_grad_by_freq is not False:
s += ", scale_grad_by_freq={scale_grad_by_freq}"
if self.sparse is not False:
s += ", sparse=True"
s += ", n_centroids={n_centroids}, block_size={block_size}"
return s.format(**self.__dict__)
| 3,719 | 33.444444 | 86 | py |
null | DA-Transformer-main/fairseq/modules/quantization/pq/modules/qlinear.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class PQLinear(nn.Module):
"""
Quantized counterpart of nn.Linear module. Stores the centroid, the assignments
and the non-quantized biases. The full weight is re-instantiated at each forward
pass.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_features x n_blocks
- bias: the non-quantized bias
Remarks:
- We refer the reader to the official documentation of the nn.Linear module
for the other arguments and the behavior of the module
- Performance tests on GPU show that this implementation is 15% slower than
the non-quantized nn.Linear module for a standard training loop.
"""
def __init__(self, centroids, assignments, bias, in_features, out_features):
super(PQLinear, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_features = in_features
self.out_features = out_features
# check compatibility
if self.in_features % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.out_features != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.out_features, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
def forward(self, x):
return F.linear(
x,
self.weight,
self.bias,
)
def extra_repr(self):
return f"in_features={self.in_features},\
out_features={self.out_features},\
n_centroids={self.n_centroids},\
block_size={self.block_size},\
bias={self.bias is not None}"
| 2,547 | 34.388889 | 86 | py |
null | DA-Transformer-main/fairseq/modules/quantization/scalar/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .utils import quantize_model_ # NOQA
| 221 | 30.714286 | 65 | py |
null | DA-Transformer-main/fairseq/modules/quantization/scalar/ops.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
try:
import torch.ao.quantization as quantization
except ImportError:
import torch.quantization as quantization
def emulate_int(w, bits, method, scale=None, zero_point=None):
q = globals()[f"emulate_int8_{method}"]
return q(w, scale=scale, zero_point=zero_point, bits=bits)
def quantize(w, scale, zero_point, bits=8):
# In the default behavior, max_val = 255.
max_val = 2**bits - 1
return (
torch.clamp(torch.round(w / scale + zero_point), 0, max_val) - zero_point
) * scale
def emulate_int8_histogram(w, scale=None, zero_point=None, bits=8):
if scale is None:
obs = quantization.observer.HistogramObserver()
obs.to(device=w.device)
_ = obs(w.float())
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point, bits=bits), scale, zero_point
def emulate_int8_channel(w, scale=None, zero_point=None, bits=8):
if scale is None:
obs = quantization.observer.PerChannelMinMaxObserver(
ch_axis=-1, qscheme=torch.per_channel_symmetric
)
obs.to(device=w.device)
_ = obs(w)
scale, zero_point, ch_axis = obs.get_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point, bits=bits), scale, zero_point
def emulate_int8_tensor(w, scale=None, zero_point=None, bits=8):
if scale is None:
obs = quantization.observer.MinMaxObserver()
obs.to(device=w.device)
_ = obs(w)
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point, bits=bits), scale, zero_point
| 2,029 | 32.833333 | 81 | py |
null | DA-Transformer-main/fairseq/modules/quantization/scalar/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from operator import attrgetter
import torch.distributed as dist
import torch.nn as nn
from ..pq.utils import attrsetter, get_layers
from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear
MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d}
def quantize_model_(
model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False
):
"""
Replaces all modules with their scalar quantized counterpart and
registers hooks to quantize the post-ativations of those modules.
Args:
- model: a nn.Module
- p: amount of noise (0 for no noise, 1 to quantize all the weights/activations)
- bits: number of bits
- update_step: update quantization parameters every update_step steps
"""
# quantize all layers
# remove weights indicates whether the weights extension should be removed, in addition to
# weight_orig and weight extension on names
quantized_layers = get_layers(model, "(.*?)", remove_weights=remove_weights)
for layer in quantized_layers:
# book-keeping
is_master_process = (not dist.is_initialized()) or (
dist.is_initialized() and dist.get_rank() == 0
)
# recover module
module = attrgetter(layer)(model)
if is_master_process:
logging.info(
f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}"
)
# quantization params
q_params = {
"p": p,
"update_step": update_step,
"bits": bits,
"method": method,
"counter": 0,
}
# instantiate the quantized counterpart
if isinstance(module, tuple(MAPPING.keys())):
QuantizedModule = MAPPING[module.__class__]
quantized_module = QuantizedModule.__new__(QuantizedModule)
params = module.__dict__
params.update(q_params)
quantized_module.__dict__.update(params)
else:
if is_master_process:
logging.info(f"Module {module} not yet supported for quantization")
continue
# activation quantization
a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method=method)
# replace layer by its quantized counterpart
attrsetter(layer)(model, quantized_module)
# return name of quantized layers
return quantized_layers
| 2,657 | 31.814815 | 94 | py |
null | DA-Transformer-main/fairseq/modules/quantization/scalar/modules/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qact import ActivationQuantizer # NOQA
from .qconv import IntConv2d # NOQA
from .qemb import IntEmbedding # NOQA
from .qlinear import IntLinear # NOQA
| 339 | 33 | 65 | py |
null | DA-Transformer-main/fairseq/modules/quantization/scalar/modules/qact.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ..ops import emulate_int
class ActivationQuantizer:
"""
Fake scalar quantization of the activations using a forward hook.
Args:
- module. a nn.Module for which we quantize the *post-activations*
- p: proportion of activations to quantize, set by default to 1
- update_step: to recompute quantization parameters
- bits: number of bits for quantization
- method: choose among {"tensor", "histogram", "channel"}
- clamp_threshold: to prevent gradients overflow
Remarks:
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- For the list of quantization methods and number of bits, see ops.py
- To remove the hook from the module, simply call self.handle.remove()
- At test time, the activations are fully quantized
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- The activations are hard-clamped in [-clamp_threshold, clamp_threshold]
to prevent overflow during the backward pass
"""
def __init__(
self,
module,
p=1,
update_step=1000,
bits=8,
method="histogram",
clamp_threshold=5,
):
self.module = module
self.p = p
self.update_step = update_step
self.counter = 0
self.bits = bits
self.method = method
self.clamp_threshold = clamp_threshold
self.handle = None
self.register_hook()
def register_hook(self):
# forward hook
def quantize_hook(module, x, y):
# update parameters every 1000 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.module.training else 1
# quantize activations
y_q, self.scale, self.zero_point = emulate_int(
y.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(y)
mask.bernoulli_(1 - p)
noise = (y_q - y).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2**self.bits - 1 - self.zero_point)
return torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach()
# register hook
self.handle = self.module.register_forward_hook(quantize_hook)
| 3,077 | 33.58427 | 87 | py |
null | DA-Transformer-main/fairseq/modules/quantization/scalar/modules/qconv.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from ..ops import emulate_int
class IntConv2d(_ConvNd):
"""
Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training.
Args:
- standard nn.Conv2d parameters
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-thgourh estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
p=0,
bits=8,
method="histogram",
update_step=1000,
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(IntConv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
False,
_pair(0),
groups,
bias,
padding_mode,
)
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def _conv_forward(self, input, weight):
if self.padding_mode != "zeros":
return F.conv2d(
F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight,
self.bias,
self.stride,
_pair(0),
self.dilation,
self.groups,
)
return F.conv2d(
input,
weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 100 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2**self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = self._conv_forward(input, weight)
return output
def extra_repr(self):
return (
"in_channels={}, out_channels={}, kernel_size={}, stride={}, "
"padding={}, dilation={}, groups={}, bias={}, quant_noise={}, "
"bits={}, method={}".format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.groups,
self.bias is not None,
self.p,
self.bits,
self.method,
)
)
| 4,448 | 28.66 | 90 | py |
null | DA-Transformer-main/fairseq/modules/quantization/scalar/modules/qemb.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..ops import emulate_int
class IntEmbedding(nn.Module):
"""
Quantized counterpart of the nn.Embedding module that applies QuantNoise during training.
Args:
- num_embeddings: number of tokens
- embedding_dim: embedding dimension
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
p=0,
update_step=1000,
bits=8,
method="histogram",
):
super(IntEmbedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if _weight is None:
self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [
num_embeddings,
embedding_dim,
], "Shape of weight does not match num_embeddings and embedding_dim"
self.weight = nn.Parameter(_weight)
self.sparse = sparse
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.normal_(self.weight)
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 1000 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2**self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = F.embedding(
input,
weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return output
def extra_repr(self):
s = "{num_embeddings}, {embedding_dim}"
if self.padding_idx is not None:
s += ", padding_idx={padding_idx}"
if self.max_norm is not None:
s += ", max_norm={max_norm}"
if self.norm_type != 2:
s += ", norm_type={norm_type}"
if self.scale_grad_by_freq is not False:
s += ", scale_grad_by_freq={scale_grad_by_freq}"
if self.sparse is not False:
s += ", sparse=True"
s += "quant_noise={p}, bits={bits}, method={method}"
return s.format(**self.__dict__)
| 4,984 | 32.682432 | 93 | py |
null | DA-Transformer-main/fairseq/modules/quantization/scalar/modules/qlinear.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..ops import emulate_int
class IntLinear(nn.Module):
"""
Quantized counterpart of the nn.Linear module that applies QuantNoise during training.
Args:
- in_features: input features
- out_features: output features
- bias: bias or not
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick.
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
in_features,
out_features,
bias=True,
p=0,
update_step=3000,
bits=8,
method="histogram",
):
super(IntLinear, self).__init__()
self.in_features = int(in_features)
self.out_features = int(out_features)
self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.chosen_bias = bias
if self.chosen_bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.chosen_bias:
nn.init.constant_(self.bias, 0.0)
return
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 100 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2**self.bits - 1 - self.zero_point)
weight = (
torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
+ noise.detach()
)
# return output
output = F.linear(input, weight, self.bias)
return output
def extra_repr(self):
return "in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}".format(
self.in_features,
self.out_features,
self.bias is not None,
self.p,
self.bits,
self.method,
)
| 3,629 | 30.842105 | 101 | py |
null | DA-Transformer-main/fairseq/optim/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from fairseq import registry
from fairseq.optim.bmuf import FairseqBMUF # noqa
from fairseq.optim.fairseq_optimizer import ( # noqa
FairseqOptimizer,
LegacyFairseqOptimizer,
)
from fairseq.optim.amp_optimizer import AMPOptimizer
from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
from fairseq.optim.shard import shard_
from omegaconf import DictConfig
__all__ = [
"AMPOptimizer",
"FairseqOptimizer",
"FP16Optimizer",
"MemoryEfficientFP16Optimizer",
"shard_",
]
(
_build_optimizer,
register_optimizer,
OPTIMIZER_REGISTRY,
OPTIMIZER_DATACLASS_REGISTRY,
) = registry.setup_registry("--optimizer", base_class=FairseqOptimizer, required=True)
def build_optimizer(cfg: DictConfig, params, *extra_args, **extra_kwargs):
if all(isinstance(p, dict) for p in params):
params = [t for p in params for t in p.values()]
params = list(filter(lambda p: p.requires_grad, params))
return _build_optimizer(cfg, params, *extra_args, **extra_kwargs)
# automatically import any Python files in the optim/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fairseq.optim." + file_name)
| 1,552 | 30.693878 | 86 | py |
null | DA-Transformer-main/fairseq/optim/adadelta.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.optim
from . import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adadelta")
class Adadelta(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO',
help='coefficient used for computing a running average of squared gradients')
parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS',
help='term added to the denominator to improve numerical stability')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"rho": self.args.adadelta_rho,
"eps": self.args.adadelta_eps,
"weight_decay": self.args.weight_decay,
}
@property
def supports_flat_params(self):
return True
| 1,835 | 37.25 | 105 | py |
null | DA-Transformer-main/fairseq/optim/adafactor.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.optim
from . import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adafactor")
class FairseqAdafactor(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adafactor(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar="E",
help='epsilons for Adafactor optimizer')
parser.add_argument('--clip-threshold', type=float, default=1.0, metavar="C",
help='threshold for clipping update root mean square')
parser.add_argument('--decay-rate', type=float, default=-0.8, metavar="D",
help='decay rate of the second moment estimator')
parser.add_argument('--beta1', type=float, default=None, metavar="B",
help='beta for first moment estimator. Optional')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--scale-parameter', action='store_true',
help='scale learning rate by root mean square of parameter')
parser.add_argument('--relative-step', action='store_true',
help='set learning rate to inverse square root of timestep,'
'otherwise use external learning rate')
parser.add_argument('--warmup-init', action='store_true',
help='use relative step for warm-up learning rate schedule')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
Note : Convergence issues empirically observed with fp16 on.
Might require search for appropriate configuration.
"""
return {
"lr": self.args.lr[0],
"eps": eval(self.args.adafactor_eps),
"clip_threshold": self.args.clip_threshold,
"decay_rate": self.args.decay_rate,
"beta1": self.args.beta1,
"weight_decay": self.args.weight_decay,
"scale_parameter": self.args.scale_parameter, # defaults to False
"relative_step": self.args.relative_step, # defaults to False
"warmup_init": self.args.warmup_init,
}
class Adafactor(torch.optim.Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on:
`Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate
depending on the *scale_parameter*, *relative_step* and
*warmup_init* options. To use a manual (external) learning rate
schedule you should set `scale_parameter=False` and
`relative_step=False`.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constans for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of
final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square
gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient
(default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of
parameter (default: True)
relative_step (bool): if True, time-dependent learning rate is computed
instead of external learning rate (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(
self,
params,
lr=None,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
scale_parameter=True,
relative_step=True,
warmup_init=False,
):
if lr is not None and relative_step:
raise ValueError("Cannot combine manual lr and relative_step options")
if warmup_init and not relative_step:
raise ValueError("warmup_init requires relative_step=True")
defaults = dict(
lr=lr,
eps=eps,
clip_threshold=clip_threshold,
decay_rate=decay_rate,
beta1=beta1,
weight_decay=weight_decay,
scale_parameter=scale_parameter,
relative_step=relative_step,
warmup_init=warmup_init,
)
super(Adafactor, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return False
def _get_lr(self, param_group, param_state):
rel_step_sz = param_group["lr"]
if param_group["relative_step"]:
min_step = (
1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2
)
rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
param_scale = 1.0
if param_group["scale_parameter"]:
param_scale = max(param_group["eps"][1], param_state["RMS"])
return param_scale * rel_step_sz
def _get_options(self, param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group["beta1"] is not None
return factored, use_first_moment
def _rms(self, tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (
(exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True))
.rsqrt_()
.unsqueeze(-1)
)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError("Adafactor does not support sparse gradients.")
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state["step"] = 0
if use_first_moment:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(grad)
if factored:
state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
state["exp_avg_sq_col"] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:]
).to(grad)
else:
state["exp_avg_sq"] = torch.zeros_like(grad)
state["RMS"] = 0
else:
if use_first_moment:
state["exp_avg"] = state["exp_avg"].to(grad)
if factored:
state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
else:
state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state["step"] += 1
state["RMS"] = self._rms(p_data_fp32)
group["lr"] = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
update = (grad**2) + group["eps"][0]
if factored:
exp_avg_sq_row = state["exp_avg_sq_row"]
exp_avg_sq_col = state["exp_avg_sq_col"]
exp_avg_sq_row.mul_(beta2t).add_(
update.mean(dim=-1), alpha=1.0 - beta2t
)
exp_avg_sq_col.mul_(beta2t).add_(
update.mean(dim=-2), alpha=1.0 - beta2t
)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_(
(self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)
)
update.mul_(group["lr"])
if use_first_moment:
exp_avg = state["exp_avg"]
exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"])
update = exp_avg
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
| 10,900 | 39.524164 | 92 | py |
null | DA-Transformer-main/fairseq/optim/adagrad.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.optim
from . import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adagrad")
class Adagrad(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"weight_decay": self.args.weight_decay,
}
@property
def supports_flat_params(self):
return False
| 1,279 | 30.219512 | 92 | py |
null | DA-Transformer-main/fairseq/optim/adam.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import Any, List
import torch
import torch.distributed as dist
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from fairseq.optim.fused_adam import get_fused_adam_class
from omegaconf import II, OmegaConf
logger = logging.getLogger(__name__)
@dataclass
class FairseqAdamConfig(FairseqDataclass):
adam_betas: Any = field(
default=(0.9, 0.999), metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
use_old_adam: bool = field(
default=False, metadata={"help": "Use fairseq.optim.adam.Adam"}
)
fp16_adam_stats: bool = field(
default=False, metadata={"help": "use FP16 stats (with automatic scaling)"}
)
# TODO common vars below in parent
tpu: bool = II("common.tpu")
lr: List[float] = II("optimization.lr")
@register_optimizer("adam", dataclass=FairseqAdamConfig)
class FairseqAdam(FairseqOptimizer):
"""Adam optimizer for fairseq.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg: FairseqAdamConfig, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = (
not getattr(cfg, "use_old_adam", False)
and fused_adam_cls is not None
and torch.cuda.is_available()
)
if getattr(cfg, "tpu", False):
if self.cfg.fp16_adam_stats:
raise NotImplementedError("--fp16-adam-stats is only supported on GPU")
# on TPUs we use the Adam defined here, since it
# automatically casts gradients to FP32
self._optimizer = Adam(params, **self.optimizer_config)
elif use_fused_adam:
logger.info("using FusedAdam")
self._optimizer = fused_adam_cls(
params, use_fp16_stats=self.cfg.fp16_adam_stats, **self.optimizer_config
)
else:
if self.cfg.fp16_adam_stats:
raise NotImplementedError(
"--fp16-adam-stats is only supported with FusedAdamV1"
)
self._optimizer = Adam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas)
if isinstance(self.cfg.adam_betas, str)
else OmegaConf.to_container(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
}
def average_params(self):
"""Reduce Params is only used during BMUF distributed training."""
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for _, value in state_dict["state"].items():
value["exp_avg"] /= total_gpus
value["exp_avg_sq"] /= total_gpus
dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
class Adam(torch.optim.Optimizer):
r"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
| 9,184 | 37.270833 | 100 | py |
null | DA-Transformer-main/fairseq/optim/adamax.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.optim
from . import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adamax")
class FairseqAdamax(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adamax(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D',
help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--no-bias-correction', default=False, action='store_true',
help='disable bias correction')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"betas": eval(self.args.adamax_betas),
"eps": self.args.adamax_eps,
"weight_decay": self.args.weight_decay,
"bias_correction": not self.args.no_bias_correction,
}
class Adamax(torch.optim.Optimizer):
"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
It has been proposed in `Adam: A Method for Stochastic Optimization`__.
Compared to the version in PyTorch, this version implements a fix for weight decay.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
bias_correction (bool, optional): enable bias correction (default: True)
__ https://arxiv.org/abs/1412.6980
"""
def __init__(
self,
params,
lr=2e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
bias_correction=True,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
bias_correction=bias_correction,
)
super(Adamax, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError("Adamax does not support sparse gradients")
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p_data_fp32)
state["exp_inf"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_inf"] = state["exp_inf"].to(p_data_fp32)
exp_avg, exp_inf = state["exp_avg"], state["exp_inf"]
beta1, beta2 = group["betas"]
eps = group["eps"]
state["step"] += 1
# Update biased first moment estimate.
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# Update the exponentially weighted infinity norm.
torch.max(
exp_inf.mul_(beta2),
grad.abs_(),
out=exp_inf,
)
step_size = group["lr"]
if group["bias_correction"]:
bias_correction = 1 - beta1 ** state["step"]
step_size /= bias_correction
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, exp_inf.add(eps), value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
| 6,225 | 34.988439 | 92 | py |
null | DA-Transformer-main/fairseq/optim/amp_optimizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from fairseq import optim
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
class AMPOptimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support AMP (automatic mixed precision) training.
"""
def __init__(self, cfg: DictConfig, params, fp32_optimizer, **kwargs):
super().__init__(cfg.optimizer)
self.fp32_optimizer = fp32_optimizer
amp_kwargs = {"init_scale": cfg.common.fp16_init_scale}
if getattr(cfg.common, "amp_scale_window", None) is not None:
amp_kwargs["growth_interval"] = cfg.common.amp_init_scale
self._grad_scaler = torch.cuda.amp.GradScaler(**amp_kwargs)
self.min_loss_scale = cfg.common.min_loss_scale
@classmethod
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
"""
Args:
cfg (omegaconf.DictConfig): fairseq args
params (iterable): iterable of parameters to optimize
"""
fp32_optimizer = optim.build_optimizer(cfg.optimizer, params)
return cls(cfg, params, fp32_optimizer, **kwargs)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
self._grad_scaler.scale(loss).backward()
def step(self):
self.scaler.step(self.fp32_optimizer)
self.scaler.update()
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
self.scaler.unscale_(self.optimizer)
grad_norm = self.fp32_optimizer.clip_grad_norm(max_norm, aggregate_norm_fn)
if not torch.isfinite(grad_norm).all():
new_loss_scale = self.next_loss_scale
if new_loss_scale <= self.min_loss_scale:
raise FloatingPointError(
(
"AMP: Minimum loss scale reached ({}). Your loss is probably exploding. "
"Try restarting training or use fp32. {}"
).format(self.min_loss_scale, new_loss_scale)
)
else:
logger.info(
"AMP: overflow detected, setting scale to " f"to {new_loss_scale}"
)
return grad_norm
@property
def scaler(self):
return self._grad_scaler
@property
def next_loss_scale(self):
return self.scaler.get_scale() * self.scaler.get_backoff_factor()
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.fp32_optimizer.optimizer = optimizer
@property
def lr_scheduler(self):
return getattr(self.fp32_optimizer, "lr_scheduler", None)
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.fp32_optimizer.all_reduce_grads(module)
@property
def supports_flat_params(self):
return self.fp32_optimizer.supports_flat_params
| 3,536 | 32.056075 | 97 | py |
null | DA-Transformer-main/fairseq/optim/bmuf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
import torch.distributed as dist
from fairseq.dataclass.configs import FairseqBMUFConfig
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.optim.fairseq_optimizer import FairseqOptimizer
class FairseqBMUF(FairseqOptimizer):
"""
Implements incremental block distributed data parallelism similar to
https://ieeexplore.ieee.org/document/7472805
Paper title: Scalable training of deep learning machines by incremental
block training with intra-block parallel optimization and blockwise
model-update filtering
"""
def __init__(self, cfg: FairseqBMUFConfig, optimizer):
super().__init__(cfg)
self._optimizer = optimizer
self._num_updates = 0
self.sync_iter = cfg.global_sync_iter
self.block_momentum = cfg.block_momentum
self.block_lr = cfg.block_lr
self._reset_local_data()
self.warmup_iteration = cfg.warmup_iterations
self.use_nbm = cfg.use_nbm
self.initial_state = self._optimizer.state_dict()
self.average_sync = self.cfg.average_sync
self.world_size = self.cfg.distributed_world_size
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
gen_parser_from_dataclass(parser, FairseqBMUFConfig())
@property
def optimizer(self):
return self._optimizer.optimizer
@property
def optimizer_config(self):
return self._optimizer.optimizer_config
def get_lr(self):
return self._optimizer.get_lr()
def set_lr(self, lr):
self._optimizer.set_lr(lr)
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self._optimizer.load_state_dict(state_dict, optimizer_overrides)
self.initial_state = self._optimizer.state_dict()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
self._optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn)
def average_params(self):
self._optimizer.average_params()
def _block_sync(self):
if self.world_size <= 1:
return
# Update the global model using local models from all GPUs
# (Step-1) Calculate grad between previously synced model and
# currrent local model
if self.block_momentum != 0:
self._calc_grad()
# (Step-2) Average gradient from all GPUs
self._avg_grad_from_all_gpus()
# (Step-3) Calculate global momentum and update the global model
if self.block_momentum != 0:
self._update_global_model()
# (Step-4) Average local optimizer params
if self.average_sync:
self.average_params()
def _is_warmup_end(self):
# Check whether train iterations is equal to warmup iter
if self.get_num_updates() == self.warmup_iteration:
return True
return False
def _is_bmuf_iter(self):
# Check whether train iterations is equal to bmuf sync iter
if (self.get_num_updates() > self.warmup_iteration) and (
self.get_num_updates() % self.sync_iter == 0
):
return True
return False
def _warmup_sync(self, root_rank=0):
if self.world_size <= 1:
return
# Broadcast the local model to all gpus
for param in self.params:
dist.broadcast(param.data, src=root_rank)
# Update local optimizer state
if self.average_sync:
self._optimizer.average_params()
else:
self._optimizer.load_state_dict(self.initial_state)
self._reset_local_data()
def step(self, closure=None):
"""Performs a single optimization step."""
self._optimizer.step(closure)
self.set_num_updates(self.get_num_updates() + 1)
if self._is_warmup_end():
self._warmup_sync()
elif self._is_bmuf_iter():
self._block_sync()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self._optimizer.zero_grad()
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
@torch.no_grad()
def _reset_local_data(self):
# (Step-0) Initialize global momentum parameters and store global copy on each gpu
self.global_params = [torch.zeros_like(p.data) for p in self.params]
self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params]
self.grads = [p.data.new_zeros(p.data.size()) for p in self.params]
# saving the global model locally for calculating gradient during bmuf sync
for param, global_param in zip(self.params, self.global_params):
global_param.copy_(param.data)
@torch.no_grad()
def _calc_grad(self):
# global_params is basically the global copy from the previously finished
# synchronisation. param.data is local parameter after block_sync_freq
# for the local gpu. so grad is difference between previously synced
# model and currrent local model.
for index, (param, global_param) in enumerate(
zip(self.params, self.global_params)
):
self.grads[index] = global_param - param.data
def _avg_grad_from_all_gpus(self):
for index, param in enumerate(self.params):
sync_para = param.data if self.block_momentum == 0 else self.grads[index]
sync_para /= float(dist.get_world_size())
dist.all_reduce(sync_para, op=dist.ReduceOp.SUM)
@torch.no_grad()
def _update_global_model(self):
for index, (param, global_param, smoothed_grad, grad) in enumerate(
zip(
self.params,
self.global_params,
self.smoothed_grads,
# all gpus would share the same value of smoothed_grad, since it is
# always computed on synchronized gradients.
self.grads,
)
):
# global_param is basically last syncrhornized parameter. though
# smoothed_grad is local, all processes will have same value of
# smoothed_grad and hence param is globally synchronized copy.
# smoothed_grad(t) = BM * smoothed_grad(t-1) + BM_lr * grad(t)
smoothed_grad = self.block_momentum * smoothed_grad + self.block_lr * grad
param.data.copy_(global_param - smoothed_grad)
# A Nesterov momentum here is to do a partial weight update before
# calculating the gradient
if self.use_nbm:
param.data.copy_(param.data - self.block_momentum * smoothed_grad)
# backup for the next synchronization.
self.smoothed_grads[index] = smoothed_grad
global_param.copy_(param.data)
| 7,449 | 36.064677 | 90 | py |
null | DA-Transformer-main/fairseq/optim/composite.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict, Any, List, Optional
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer, _build_optimizer
from fairseq.optim.lr_scheduler import FairseqLRScheduler, build_lr_scheduler
from omegaconf import II, open_dict
logger = logging.getLogger(__name__)
@dataclass
class OptimizerAndSchedulerConfig(FairseqDataclass):
optimizer: Any = None
lr_scheduler: Optional[Any] = None
lr: List = II("optimization.lr")
lr_float: Optional[
float
] = None # this makes it easier to sweep on learning rate with auto sweepers
@dataclass
class CompositeOptimizerConfig(FairseqDataclass):
groups: Dict[str, Any] = field(
default_factory=lambda: {},
metadata={
"help": "optimizer name -> optimizer OptimizerAndSchedulerConfig. "
"Configures a different optimizer and (optionally) lr scheduler for each parameter group"
},
)
@register_optimizer("composite", dataclass=CompositeOptimizerConfig)
class FairseqCompositeOptimizer(FairseqOptimizer):
optimizers: Dict[str, FairseqOptimizer] = {}
lr_schedulers: Dict[str, FairseqLRScheduler] = {}
lr_scheduler: FairseqLRScheduler = None
_optimizer: torch.optim.Optimizer
def __init__(self, cfg: CompositeOptimizerConfig, params):
super().__init__(cfg)
assert (
len(params) > 1
), "Composite optimizer only works when there are multiple parameter groups (try fp16_no_flatten_grads: true)"
groupped_params = defaultdict(list)
for p in params:
group = getattr(p, "param_group", "default")
groupped_params[group].append(p)
assert groupped_params.keys() == cfg.groups.keys(), (
f"Parameter groups {groupped_params.keys()} and optimizer groups {cfg.groups.keys()} are not the same! "
"Try setting 'param_group' on your parameters in the model."
)
for group, group_params in groupped_params.items():
group_cfg = cfg.groups[group]
with open_dict(group_cfg):
if group_cfg.lr_float is not None:
group_cfg.optimizer.lr = [group_cfg.lr_float]
group_cfg.lr_scheduler.lr = [group_cfg.lr_float]
else:
group_cfg.optimizer.lr = group_cfg.lr
group_cfg.lr_scheduler.lr = group_cfg.lr
self.optimizers[group] = _build_optimizer(group_cfg.optimizer, group_params)
if group_cfg.lr_scheduler is not None:
self.lr_schedulers[group] = build_lr_scheduler(
group_cfg.lr_scheduler, self.optimizers[group]
)
if len(self.lr_schedulers) > 0:
assert len(self.lr_schedulers) == len(self.optimizers), (
f"Please provide an lr scheduler for each optimizer to use pass_through scheduler. "
f"Optimizers: {self.optimizers}; Lr scheds: {self.lr_schedulers}"
)
self.lr_scheduler = CompositeLRScheduler(self.lr_schedulers)
self._optimizer = CompositeOptimizer(self.optimizers)
@property
def supports_groups(self):
return True
@property
def param_groups(self):
for opt in self.optimizers.values():
for group in opt.param_groups:
yield group
def get_lr(self):
"""Return the current learning rate."""
k = (
"default"
if "default" in self.optimizers
else next(iter(self.optimizers.keys()))
)
return self.optimizers[k].param_groups[0]["lr"]
def state_dict(self):
"""Return the LR scheduler state dict."""
return {k: s.state_dict() for k, s in self.optimizers.items()}
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an LR scheduler state dict."""
for k, state in state_dict.items():
if k not in self.optimizers:
# skip extra keys like "loss_scale" added by fp16 optimizer
continue
overrides = (
optimizer_overrides[k]
if isinstance(optimizer_overrides, dict) and k in optimizer_overrides
else None
)
self.optimizers[k].load_state_dict(state, optimizer_overrides=overrides)
class CompositeOptimizer(torch.optim.Optimizer):
def __init__(self, optimizers: Dict[str, FairseqOptimizer]):
self.optimizers = optimizers
@property
def supports_memory_efficient_fp16(self):
return all(o.supports_memory_efficient_fp16 for o in self.optimizers.values())
@property
def supports_flat_params(self):
return all(o.supports_flat_params for o in self.optimizers.values())
def step(self, closure=None, groups=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for k, opt in self.optimizers.items():
if groups is None or k in groups:
opt.step()
return loss
def zero_grad(self):
for opt in self.optimizers.values():
opt.zero_grad()
class CompositeLRScheduler(FairseqLRScheduler):
def __init__(self, lr_schedulers):
super().__init__(None, None)
self.lr_schedulers = lr_schedulers
def state_dict(self):
"""Return the LR scheduler state dict."""
return {k: s.state_dict() for k, s in self.lr_schedulers.items()}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
for k, state in state_dict.items():
self.lr_schedulers[k].load_state_dict(state)
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
for s in self.lr_schedulers.values():
s.step_begin_epoch(epoch)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
for s in self.lr_schedulers.values():
s.step(epoch)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return {k: s.step_update(num_updates) for k, s in self.lr_schedulers.items()}
| 6,757 | 34.382199 | 118 | py |
null | DA-Transformer-main/fairseq/optim/cpu_adam.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from omegaconf import II, DictConfig
try:
import deepspeed
has_deepspeed = True
except ImportError as e:
has_deepspeed = False
def _get_cpu_adam():
try:
from deepspeed.ops.op_builder import CPUAdamBuilder
return CPUAdamBuilder().load()
except ImportError:
# fbcode
from deepspeed.ops.adam import DeepSpeedCPUAdam as ds_opt_adam
return ds_opt_adam
@dataclass
class FairseqCPUAdamConfig(FairseqDataclass):
adam_betas: str = field(
default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
fp16_adam_stats: bool = field(
default=False, metadata={"help": "use FP16 stats (with automatic scaling)"}
)
# TODO common vars below in parent
lr: List[float] = II("optimization.lr")
@register_optimizer("cpu_adam", dataclass=FairseqCPUAdamConfig)
class FairseqCPUAdam(FairseqOptimizer):
"""Adam optimizer for fairseq, optimized for CPU tensors.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
self._optimizer = CPUAdam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
"use_fp16_stats": self.cfg.fp16_adam_stats,
}
class CPUAdam(torch.optim.Optimizer):
optimizer_id = 0
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
use_fp16_stats=False,
):
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
}
super().__init__(params, defaults)
self.use_fp16_stats = use_fp16_stats
self.FLOAT16_MAX = 65504.0
if not has_deepspeed:
raise ImportError("Please install DeepSpeed: pip install deepspeed")
self.opt_id = CPUAdam.optimizer_id
CPUAdam.optimizer_id = CPUAdam.optimizer_id + 1
self.ds_opt_adam = _get_cpu_adam()
adamw_mode = True
self.ds_opt_adam.create_adam(
self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode, False
)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
torch.cuda.synchronize()
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
state["step"] = 0
dtype = torch.float16 if self.use_fp16_stats else p.data.dtype
# gradient momentums
state["exp_avg"] = torch.zeros_like(
p.data, dtype=dtype, device="cpu"
)
# gradient variances
state["exp_avg_sq"] = torch.zeros_like(
p.data, dtype=dtype, device="cpu"
)
if self.use_fp16_stats:
assert torch.is_floating_point(p.data)
state["exp_avg_scale"] = 1.0
state["exp_avg_sq_scale"] = 1.0
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
p_data_bak = p.data # backup of the original data pointer
p.data = p.data.to(dtype=torch.float32, device="cpu")
p.grad.data = p.grad.data.to(dtype=torch.float32, device="cpu")
if self.use_fp16_stats:
exp_avg = exp_avg.float() * state["exp_avg_scale"]
exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"]
state["step"] += 1
beta1, beta2 = group["betas"]
self.ds_opt_adam.adam_update(
self.opt_id,
state["step"],
group["lr"],
beta1,
beta2,
group["eps"],
group["weight_decay"],
group["bias_correction"],
p.data,
p.grad.data,
exp_avg,
exp_avg_sq,
)
if p_data_bak.data_ptr() != p.data.data_ptr():
p_data_bak.copy_(p.data)
p.data = p_data_bak
if self.use_fp16_stats:
def inf_norm(t):
return torch.norm(t, float("inf"))
# from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py
state["exp_avg_scale"], state["exp_avg_sq_scale"] = (
1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX,
1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX,
)
state["exp_avg"], state["exp_avg_sq"] = (
(exp_avg / state["exp_avg_scale"]).half(),
(exp_avg_sq / state["exp_avg_sq_scale"]).half(),
)
return loss
| 6,802 | 31.241706 | 86 | py |
null | DA-Transformer-main/fairseq/optim/dynamic_loss_scaler.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class DynamicLossScaler(object):
def __init__(
self,
init_scale=2.0**15,
scale_factor=2.0,
scale_window=2000,
tolerance=0.0,
threshold=None,
min_loss_scale=1e-4,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
self.min_loss_scale = min_loss_scale
def scale(self, outputs):
return self.loss_scale * outputs
def update(self):
if (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
def check_overflow(self, grad_norm):
# detect inf and nan
if grad_norm == float("inf") or grad_norm != grad_norm:
# overflow has occured
prev_scale = self.loss_scale
iter_since_rescale = self._iter - self._last_rescale_iter
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
if self.loss_scale <= self.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
self.loss_scale = prev_scale
raise FloatingPointError(
(
"Minimum loss scale reached ({}). Your loss is probably exploding. "
"Try lowering the learning rate, using gradient clipping or "
"increasing the batch size."
).format(self.min_loss_scale)
)
self._iter += 1
raise OverflowError("setting loss scale to: " + str(self.loss_scale))
| 2,635 | 36.126761 | 92 | py |
null | DA-Transformer-main/fairseq/optim/fairseq_optimizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.dataclass.utils import gen_parser_from_dataclass
class FairseqOptimizer(object):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
@classmethod
def add_args(cls, parser):
"""Add optimizer-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer):
"""Reset optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
self._optimizer = optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
@property
def params(self):
"""Return an iterable of the parameters held by the optimizer."""
for param_group in self.param_groups:
for p in param_group["params"]:
yield p
@property
def param_groups(self):
return self.optimizer.param_groups
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
"""Return the current learning rate."""
return self.param_groups[0]["lr"]
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.param_groups:
param_group["lr"] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
loss.backward()
def all_reduce_grads(self, module):
"""Manually all-reduce gradients (if required)."""
if hasattr(module, "all_reduce_grads"):
module.all_reduce_grads()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for p in self.params:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
def step(self, closure=None, scale=1.0, groups=None):
"""Performs a single optimization step."""
if self.supports_step_with_scale:
if self.supports_groups:
self.optimizer.step(closure, scale=scale, groups=groups)
else:
self.optimizer.step(closure, scale=scale)
else:
if scale != 1.0:
self.multiply_grads(1.0 / scale)
if self.supports_groups:
self.optimizer.step(closure, groups=groups)
else:
self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, "supports_memory_efficient_fp16"):
return self.optimizer.supports_memory_efficient_fp16
return False
@property
def supports_step_with_scale(self):
if hasattr(self.optimizer, "supports_step_with_scale"):
return self.optimizer.supports_step_with_scale
return False
@property
def supports_groups(self):
if hasattr(self.optimizer, "supports_groups"):
return self.optimizer.supports_groups
return False
@property
def supports_flat_params(self):
"""
Whether the optimizer supports collapsing of the model
parameters/gradients into a single contiguous Tensor.
"""
if hasattr(self.optimizer, "supports_flat_params"):
return self.optimizer.supports_flat_params
return False
def average_params(self):
pass
def broadcast_global_state_dict(self, state_dict):
"""
Broadcasts a global state dict to all ranks.
Useful for optimizers that shard state between ranks.
"""
if hasattr(self.optimizer, "broadcast_global_state_dict"):
return self.optimizer.broadcast_global_state_dict(state_dict)
else:
return state_dict
class LegacyFairseqOptimizer(FairseqOptimizer):
def __init__(self, args):
self.args = args
| 6,176 | 33.316667 | 87 | py |
null | DA-Transformer-main/fairseq/optim/fp16_optimizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from itertools import chain
import torch
from omegaconf import DictConfig
from fairseq import optim
from .dynamic_loss_scaler import DynamicLossScaler
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in mro(method resolution order)
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
@property
def has_flat_params(self):
return torch.is_tensor(self.fp32_params) or (
isinstance(self.fp32_params, dict)
and all(torch.is_tensor(t) for t in self.fp32_params.values())
)
@classmethod
def build_fp32_params(cls, args, params, flatten=True):
# create FP32 copy of parameters and grads
if flatten:
is_pipeline_parallel = getattr(
args, "pipeline_model_parallel", False
) and getattr(args, "distributed_no_spawn", False)
total_param_size = sum(p.data.numel() for p in params)
devices = [torch.cuda.current_device()]
if is_pipeline_parallel:
devices = list(set(args.pipeline_devices))
fp32_params = {}
for device in devices:
if is_pipeline_parallel:
device_param_size = sum(
p.data.numel() for p in params if p.device.index == device
)
device_params = [p for p in params if p.device.index == device]
else:
device_param_size = total_param_size
device_params = params
fp32_params[device] = (
device_params[0].new(0).float().new(device_param_size)
)
offset = 0
for p in device_params:
numel = p.data.numel()
fp32_params[device][offset : offset + numel].copy_(p.data.view(-1))
offset += numel
fp32_params[device] = torch.nn.Parameter(fp32_params[device])
fp32_params[device].grad = fp32_params[device].data.new(
device_param_size
)
return fp32_params
else:
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float())
if hasattr(p, "expert"):
p32.expert = True
elif hasattr(p, "base_expert"):
p32.base_expert = True
p32.grad = torch.zeros_like(p32.data)
if hasattr(p, "param_group"):
p32.param_group = p.param_group
fp32_params.append(p32)
return fp32_params
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
if self.scaler is not None:
state_dict["loss_scale"] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if "loss_scale" in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict["loss_scale"]
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self):
if self._needs_sync:
# copy FP16 grads to FP32
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
if p.requires_grad:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
grad_data = (
p.grad.data
if p.grad is not None
else p.data.new_zeros(p.data.shape)
)
numel = grad_data.numel()
self.fp32_params[device].grad.data[
offset : offset + numel
].copy_(grad_data.view(-1))
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
if p.grad is not None:
if p32.grad is None:
p32.grad = p.grad.data.float()
else:
p32.grad.data.copy_(p.grad.data)
else:
p32.grad = torch.zeros_like(p.data, dtype=torch.float)
self._needs_sync = False
def _sync_fp32_params_to_fp16(self):
# copy FP32 params back into FP16 model
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
numel = p.data.numel()
p.data.copy_(
self.fp32_params[device]
.data[offset : offset + numel]
.view_as(p.data)
)
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
p.data.copy_(p32.data)
def _unscale_grads(self):
self._sync_fp16_grads_to_fp32()
if (
# Skip the multiplication if it's a no-op (i.e., if _multiply_factor
# is 1.0). At the same time, we want to avoid the device-to-host
# transfer by comparing it to 1.0. Since _multiply_factor starts as
# a Python float, we roughly assume that if it's a tensor then it's
# probably not =1.0 anymore and we do the multiplication. Otherwise
# we can safely check the value without a D2H transfer.
torch.is_tensor(self._multiply_factor)
or self._multiply_factor != 1.0
):
self.fp32_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm(
0, aggregate_norm_fn
)
if self.scaler is not None:
if grad_norm > max_norm > 0.0:
self._multiply_factor *= max_norm / grad_norm
self.scaler.check_overflow(grad_norm)
elif max_norm > 0.0:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
if getattr(self, "supports_step_with_scale", False):
self.fp32_optimizer.step(
closure, scale=(1.0 / self._multiply_factor), groups=groups
)
else:
self._unscale_grads()
self.fp32_optimizer.step(closure, groups=groups)
if self.scaler is not None:
self.scaler.update()
self._sync_fp32_params_to_fp16()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
if p32.grad is not None:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs):
super().__init__(cfg.optimizer)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if getattr(cfg.common, "fp16_scale_window", None) is None:
if len(cfg.optimization.update_freq) > 1:
raise ValueError(
"--fp16-scale-window must be given explicitly when using a "
"custom --update-freq schedule"
)
data_parallel_size = int(
cfg.distributed_training.distributed_world_size
/ cfg.common.model_parallel_size
)
scale_window = int(
2**14 / data_parallel_size / cfg.optimization.update_freq[0]
)
else:
scale_window = cfg.common.fp16_scale_window
if not getattr(cfg.common, "bf16", False):
self.scaler = DynamicLossScaler(
init_scale=cfg.common.fp16_init_scale,
scale_window=scale_window,
tolerance=cfg.common.fp16_scale_tolerance,
threshold=cfg.common.threshold_loss_scale,
min_loss_scale=cfg.common.min_loss_scale,
)
else:
# disable loss scaling for bfloat16
self.scaler = None
@classmethod
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
"""
Args:
cfg (omegaconf.DictConfig): fairseq args
params (iterable): iterable of parameters to optimize
"""
flatten = not getattr(cfg.common, "fp16_no_flatten_grads", False)
if getattr(cfg.common, "bf16", False):
flatten = False # mixed precision is faster on TPUs without flat grads
fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten)
if flatten:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params])
else:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params)
if flatten and not fp32_optimizer.supports_flat_params:
raise RuntimeError(
f"chosen optimizer {fp32_optimizer.__class__.__name__} does not support flat params, please set --fp16-no-flatten-grads"
)
return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.fp32_optimizer.optimizer = optimizer
@property
def lr_scheduler(self):
return getattr(self.fp32_optimizer, "lr_scheduler", None)
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.fp32_optimizer.all_reduce_grads(module)
@property
def supports_flat_params(self):
return self.fp32_optimizer.supports_flat_params
class _MemoryEfficientFP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in MRO (method resolution order)
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
@property
def has_flat_params(self):
return False
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.wrapped_optimizer.state_dict()
if self.scaler is not None:
state_dict["loss_scale"] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if "loss_scale" in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict["loss_scale"]
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
# Hack: PyTorch automatically casts the optimizer state to match the
# type of the current parameters. But with --memory-efficient-fp16 the
# params are FP16 while the optimizer state is FP32 and we don't want
# to cast. A workaround is to manually copy back the original state
# after the optimizer has been loaded.
if not getattr(self.optimizer, "disable_mem_eff_fp16_loading_hack", False):
groups = self.optimizer.param_groups
saved_groups = state_dict["param_groups"]
id_map = {
old_id: p
for old_id, p in zip(
chain(*(g["params"] for g in saved_groups)),
chain(*(g["params"] for g in groups)),
)
}
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
self.optimizer.state[param] = v
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
def _unscale_grads(self):
if (
# Skip the multiplication if it's a no-op (i.e., if _multiply_factor
# is 1.0). At the same time, we want to avoid the device-to-host
# transfer by comparing it to 1.0. Since _multiply_factor starts as
# a Python float, we roughly assume that if it's a tensor then it's
# probably not =1.0 anymore and we do the multiplication. Otherwise
# we can safely check the value without a D2H transfer.
torch.is_tensor(self._multiply_factor)
or self._multiply_factor != 1.0
):
self.wrapped_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm and updates dynamic loss scaler."""
max_norm = float(max_norm)
grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm(
0, aggregate_norm_fn
)
if self.scaler is not None:
grad_norm_cpu = float(grad_norm)
if grad_norm_cpu > max_norm > 0.0:
self._multiply_factor *= max_norm / grad_norm_cpu
# detect overflow and adjust loss scale
self.scaler.check_overflow(grad_norm_cpu)
elif max_norm > 0.0:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
"""Performs a single optimization step."""
if getattr(self, "supports_step_with_scale", False):
# NOTE(msb) optimizer divides by scale factor
self.wrapped_optimizer.step(
closure, scale=(1.0 / self._multiply_factor), groups=groups
)
else:
self._unscale_grads()
self.wrapped_optimizer.step(closure, groups=groups)
if self.scaler is not None:
self.scaler.update()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.wrapped_optimizer.zero_grad()
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
else:
self._multiply_factor = 1.0
@property
def supports_flat_params(self):
return self.wrapped_optimizer.supports_flat_params
class MemoryEfficientFP16Optimizer(
_MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer
):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not
maintain an FP32 copy of the model. We instead expect the optimizer to
convert the gradients to FP32 internally and sync the results back to the
FP16 model params. This significantly reduces memory usage but slightly
increases the time spent in the optimizer.
Since this wrapper depends on specific functionality in the wrapped
optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
optimizers can be wrapped. This is determined by the
*supports_memory_efficient_fp16* property.
"""
def __init__(
self, cfg: DictConfig, params, optimizer, allow_unsupported=False, **kwargs
):
if not allow_unsupported and not optimizer.supports_memory_efficient_fp16:
raise ValueError(
"Unsupported optimizer: {}".format(optimizer.__class__.__name__)
)
super().__init__(getattr(cfg, "optimizer", None))
self.wrapped_optimizer = optimizer
if getattr(cfg.common, "fp16_scale_window", None) is None:
if len(cfg.optimization.update_freq) > 1:
raise ValueError(
"--fp16-scale-window must be given explicitly when using a "
"custom --update-freq schedule"
)
data_parallel_size = int(
cfg.distributed_training.distributed_world_size
/ cfg.common.model_parallel_size
)
scale_window = int(
2**14 / data_parallel_size / cfg.optimization.update_freq[0]
)
else:
scale_window = cfg.common.fp16_scale_window
if not getattr(cfg.common, "bf16", False):
self.scaler = DynamicLossScaler(
init_scale=cfg.common.fp16_init_scale,
scale_window=scale_window,
tolerance=cfg.common.fp16_scale_tolerance,
threshold=cfg.common.threshold_loss_scale,
min_loss_scale=cfg.common.min_loss_scale,
)
else:
# disable loss scaling for bfloat16
self.scaler = None
@classmethod
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
fp16_optimizer = optim.build_optimizer(cfg.optimizer, params)
return cls(cfg, params, fp16_optimizer, **kwargs)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.wrapped_optimizer.optimizer = optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
@property
def lr_scheduler(self):
return getattr(self.wrapped_optimizer, "lr_scheduler", None)
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.wrapped_optimizer.all_reduce_grads(module)
| 21,467 | 37.750903 | 136 | py |
null | DA-Transformer-main/fairseq/optim/fused_adam.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import types
import torch
def get_fused_adam_class():
"""
Look for the FusedAdam optimizer from apex. We first try to load the
"contrib" interface, which is a bit faster than the main interface,
but is technically deprecated.
"""
try:
# The "deprecated" interface in recent versions of apex is a bit
# faster than the main interface, since we don't use the apex
# optimizer. This can be installed by passing the
# `--deprecated_fused_adam` option when building apex.
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
return FusedAdamV1
except ImportError:
try:
# fallback to the newer interface
from apex.multi_tensor_apply import multi_tensor_applier
from apex.optimizers import FusedAdam as _FusedAdam # noqa
if multi_tensor_applier.available:
return FusedAdamV2
except ImportError:
pass
return None
class FusedAdamV1(torch.optim.Optimizer):
"""
Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Compared to the original version in Apex, the fairseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.0,
max_grad_norm=0.0,
amsgrad=False,
use_fp16_stats=False,
):
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
if amsgrad:
raise RuntimeError("FusedAdam does not support the AMSGrad variant.")
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"max_grad_norm": max_grad_norm,
}
super().__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
self.use_fp16_stats = use_fp16_stats
self.FLOAT16_MAX = 65504.0
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
@property
def supports_step_with_scale(self):
return True
def step(self, closure=None, grads=None, scale=1.0, grad_norms=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if grad_norms is None:
grad_norms = [None] * len(self.param_groups)
for group, grads_this_group, grad_norm in zip(
self.param_groups, grads_group, grad_norms
):
if grads_this_group is None:
grads_this_group = [None] * len(group["params"])
# compute combined scale factor for this group
combined_scale = scale
if group.get("max_grad_norm", 0) > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"]
if clip > 1:
combined_scale = clip * scale
bias_correction = 1 if group.get("bias_correction", 1) else 0
for p, grad in zip(group["params"], grads_this_group):
# note: p.grad should not ever be set for correct
# operation of mixed precision optimizer that sometimes
# sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
if p.device.type == "cpu":
p_data_fp32 = p.data.cuda(non_blocking=True).float()
out_p = torch.tensor([], dtype=torch.float)
else:
p_data_fp32 = p.data.float()
out_p = p.data
state = self.state[p]
# State initialization
dtype = torch.float16 if self.use_fp16_stats else p_data_fp32.dtype
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32, dtype=dtype)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32, dtype=dtype)
if self.use_fp16_stats:
state["exp_avg_scale"] = 1.0
state["exp_avg_sq_scale"] = 1.0
else:
device = p_data_fp32.device
state["exp_avg"] = state["exp_avg"].to(device, dtype)
state["exp_avg_sq"] = state["exp_avg_sq"].to(device, dtype)
exp_avg = state["exp_avg"]
exp_avg_sq = state["exp_avg_sq"]
if self.use_fp16_stats:
assert exp_avg.dtype == torch.float16
exp_avg = exp_avg.float() * state["exp_avg_scale"]
exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"]
beta1, beta2 = group["betas"]
state["step"] += 1
with torch.cuda.device(p_data_fp32.device):
fused_adam_cuda.adam(
p_data_fp32,
out_p,
exp_avg,
exp_avg_sq,
grad,
group["lr"],
beta1,
beta2,
group["eps"],
combined_scale,
state["step"],
self.eps_mode,
bias_correction,
group["weight_decay"],
)
if p.device.type == "cpu":
p.data.copy_(p_data_fp32, non_blocking=True)
if self.use_fp16_stats:
def inf_norm(t):
return torch.norm(t, float("inf"))
# from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py
state["exp_avg_scale"], state["exp_avg_sq_scale"] = (
1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX,
1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX,
)
state["exp_avg"], state["exp_avg_sq"] = (
(exp_avg / state["exp_avg_scale"]).half(),
(exp_avg_sq / state["exp_avg_sq_scale"]).half(),
)
return loss
try:
from apex.multi_tensor_apply import multi_tensor_applier
from apex.optimizers import FusedAdam
class FusedAdamV2(FusedAdam):
"""
Compared to the original version in Apex, the fairseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
"""
def __init__(self, *args, use_fp16_stats=False, **kwargs):
if use_fp16_stats:
raise NotImplementedError(
"--fp16-adam-stats is only supported with FusedAdamV1"
)
super().__init__(*args, **kwargs)
if not hasattr(self, "multi_tensor_adam"):
raise Exception(
"Apex installation is outdated. Please install an updated version of apex."
)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(
self,
closure=None,
grads=None,
output_params=None,
scale=None,
grad_norms=None,
):
"""Performs a single optimization step."""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group["bias_correction"] else 0
beta1, beta2 = group["betas"]
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if "step" in group:
group["step"] += 1
else:
group["step"] = 1
# create lists for multi-tensor apply
g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group["params"]:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p.data, dtype=torch.float
)
else:
state["exp_avg"] = state["exp_avg"].to(
device=p.data.device, dtype=torch.float
)
state["exp_avg_sq"] = state["exp_avg_sq"].to(
device=p.data.device, dtype=torch.float
)
if p.dtype == torch.float16:
g_16.append(p.grad.data.float())
p_16.append(p.data.float())
orig_p_16.append(p.data)
m_16.append(state["exp_avg"])
v_16.append(state["exp_avg_sq"])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state["exp_avg"])
v_32.append(state["exp_avg_sq"])
else:
raise RuntimeError("FusedAdam only support fp16 and fp32.")
with torch.cuda.device(p.device):
if len(g_16) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group["lr"],
beta1,
beta2,
group["eps"],
group["step"],
self.adam_w_mode,
bias_correction,
group["weight_decay"],
)
for orig_p, p in zip(orig_p_16, p_16):
orig_p.copy_(p.data)
if len(g_32) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group["lr"],
beta1,
beta2,
group["eps"],
group["step"],
self.adam_w_mode,
bias_correction,
group["weight_decay"],
)
return loss
except ImportError:
pass
| 15,188 | 38.248062 | 104 | py |
null | DA-Transformer-main/fairseq/optim/fused_lamb.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.optim import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("lamb")
class FairseqLAMB(LegacyFairseqOptimizer):
"""LAMB optimizer."""
def __init__(self, args, params):
super().__init__(args)
try:
from apex.optimizers import FusedLAMB
self._optimizer = FusedLAMB(params, **self.optimizer_config)
except ImportError:
raise ImportError("Please install apex to use LAMB optimizer")
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B',
help='betas for LAMB optimizer')
parser.add_argument('--lamb-eps', type=float, default=1e-8, metavar='D',
help='epsilon for LAMB optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"betas": eval(self.args.lamb_betas),
"eps": self.args.lamb_eps,
"weight_decay": self.args.weight_decay,
}
@property
def supports_flat_params(self):
return False
| 1,834 | 34.288462 | 92 | py |
null | DA-Transformer-main/fairseq/optim/nag.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
from fairseq.dataclass import FairseqDataclass
from omegaconf import II, DictConfig
from torch.optim.optimizer import Optimizer, required
from . import FairseqOptimizer, register_optimizer
@dataclass
class FairseqNAGConfig(FairseqDataclass):
momentum: float = field(default=0.99, metadata={"help": "momentum factor"})
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
# TODO common vars in parent class
lr: List[float] = II("optimization.lr")
@register_optimizer("nag", dataclass=FairseqNAGConfig)
class FairseqNAG(FairseqOptimizer):
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
self._optimizer = NAG(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"momentum": self.cfg.momentum,
"weight_decay": self.cfg.weight_decay,
}
class NAG(Optimizer):
def __init__(self, params, lr=required, momentum=0, weight_decay=0):
defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay)
super(NAG, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
lr = group["lr"]
lr_old = group.get("lr_old", lr)
lr_correct = lr / lr_old if lr_old > 0 else lr
for p in group["params"]:
if p.grad is None:
continue
p_data_fp32 = p.data
if p_data_fp32.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
d_p = p.grad.data.float()
param_state = self.state[p]
if "momentum_buffer" not in param_state:
param_state["momentum_buffer"] = torch.zeros_like(d_p)
else:
param_state["momentum_buffer"] = param_state["momentum_buffer"].to(
d_p
)
buf = param_state["momentum_buffer"]
if weight_decay != 0:
p_data_fp32.mul_(1 - lr * weight_decay)
p_data_fp32.add_(buf, alpha=momentum * momentum * lr_correct)
p_data_fp32.add_(d_p, alpha=-(1 + momentum) * lr)
buf.mul_(momentum * lr_correct).add_(d_p, alpha=-lr)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
group["lr_old"] = lr
return loss
| 3,731 | 32.321429 | 87 | py |
null | DA-Transformer-main/fairseq/optim/sgd.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.optim
from . import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("sgd")
class SGD(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--momentum', default=0.0, type=float, metavar='M',
help='momentum factor')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"momentum": self.args.momentum,
"weight_decay": self.args.weight_decay,
}
@property
def supports_flat_params(self):
return True
| 1,442 | 31.795455 | 92 | py |
null | DA-Transformer-main/fairseq/optim/shard.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from fairseq.distributed import utils
try:
from fairscale.optim import OSS
_has_fairscale = True
except ImportError:
_has_fairscale = False
def shard_(optimizer, group):
if not _has_fairscale:
raise ImportError(
"\n\nPlease install the fairscale package:" "\n\n pip install fairscale"
)
class FairseqOSS(OSS):
@property
def disable_mem_eff_fp16_loading_hack(self):
return True
def __getattr__(self, name):
if name.startswith("supports") and hasattr(self.optim, name):
return getattr(self.optim, name)
raise AttributeError(
"'FairseqOSS' object has no attribute {0!r}".format(name)
)
def broadcast_global_state_dict(
self, state_dict: Dict[str, Any]
) -> Dict[str, Any]:
"""
Broadcasts the entire state_dict to all other ranks
each rank is responsible to load their own partition of data
"""
return utils.broadcast_object(
state_dict,
src_rank=0,
group=self.group,
)
torch_optimizer = optimizer.optimizer
optim_cls = type(torch_optimizer)
optimizer.optimizer = FairseqOSS(
torch_optimizer.param_groups,
optim_cls,
group=group,
**optimizer.optimizer_config
)
| 1,624 | 26.542373 | 85 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from fairseq import registry
from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import ( # noqa
FairseqLRScheduler,
LegacyFairseqLRScheduler,
)
from omegaconf import DictConfig
(
build_lr_scheduler_,
register_lr_scheduler,
LR_SCHEDULER_REGISTRY,
LR_SCHEDULER_DATACLASS_REGISTRY,
) = registry.setup_registry(
"--lr-scheduler", base_class=FairseqLRScheduler, default="fixed"
)
def build_lr_scheduler(cfg: DictConfig, optimizer):
return build_lr_scheduler_(cfg, optimizer)
# automatically import any Python files in the optim/lr_scheduler/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fairseq.optim.lr_scheduler." + file_name)
| 1,053 | 27.486486 | 76 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class CosineLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = field(
default=II("optimization.lr"),
metadata={"help": "max learning rate, must be more than cfg.min_lr"},
)
min_lr: float = field(default=0.0, metadata={"help": "min learning rate"})
t_mult: float = field(
default=1.0, metadata={"help": "factor to grow the length of each period"}
)
lr_period_updates: float = field(
default=-1, metadata={"help": "initial number of updates per period"}
)
lr_shrink: float = field(
default=0.1, metadata={"help": "shrink factor for annealing"}
)
# This is not required, but is for convenience in inferring lr_period_updates
max_update: int = II("optimization.max_update")
@register_lr_scheduler("cosine", dataclass=CosineLRScheduleConfig)
class CosineLRSchedule(FairseqLRScheduler):
"""Assign LR based on a cyclical schedule that follows the cosine function.
See https://arxiv.org/pdf/1608.03983.pdf for details.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
max learning rate (``--lr``).
During warmup::
lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates)
lr = lrs[update_num]
After warmup::
lr = cfg.min_lr + 0.5*(cfg.lr - cfg.min_lr)*(1 + cos(t_curr / t_i))
where ``t_curr`` is current percentage of updates within the current period
range and ``t_i`` is the current period range, which is scaled by ``t_mul``
after every iteration.
"""
def __init__(self, cfg: CosineLRScheduleConfig, fairseq_optimizer):
super().__init__(cfg, fairseq_optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with cosine."
f" Consider --lr-scheduler=fixed instead. ({cfg.lr})"
)
self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
assert (
self.max_lr > cfg.min_lr
), f"max_lr (={cfg.lr}) must be more than min_lr (={cfg.min_lr})"
warmup_end_lr = self.max_lr
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = cfg.min_lr
self.t_mult = cfg.t_mult
self.period = cfg.lr_period_updates
if self.period <= 0:
assert (
cfg.max_update > 0
), "Either --max_update or --lr-period-updates must be set"
self.period = cfg.max_update - cfg.warmup_updates
if cfg.warmup_updates > 0:
# linearly warmup for the first cfg.warmup_updates
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
else:
self.lr_step = 1
self.warmup_updates = cfg.warmup_updates
self.lr_shrink = cfg.lr_shrink
# initial learning rate
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
else:
curr_updates = num_updates - self.cfg.warmup_updates
if self.t_mult != 1:
i = math.floor(
math.log(
1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult
)
)
t_i = self.t_mult**i * self.period
t_curr = (
curr_updates
- (1 - self.t_mult**i) / (1 - self.t_mult) * self.period
)
else:
i = math.floor(curr_updates / self.period)
t_i = self.period
t_curr = curr_updates - (self.period * i)
lr_shrink = self.lr_shrink**i
min_lr = self.cfg.min_lr * lr_shrink
max_lr = self.max_lr * lr_shrink
self.lr = min_lr + 0.5 * (max_lr - min_lr) * (
1 + math.cos(math.pi * t_curr / t_i)
)
self.optimizer.set_lr(self.lr)
return self.lr
| 5,301 | 34.824324 | 87 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.optim import FairseqOptimizer
class FairseqLRScheduler(object):
def __init__(self, cfg, optimizer):
super().__init__()
if optimizer is not None and not isinstance(optimizer, FairseqOptimizer):
raise ValueError("optimizer must be an instance of FairseqOptimizer")
self.cfg = cfg
self.optimizer = optimizer
self.best = None
@classmethod
def add_args(cls, parser):
"""Add arguments to the parser for this LR scheduler."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
def state_dict(self):
"""Return the LR scheduler state dict."""
return {"best": self.best}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.best = state_dict["best"]
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
pass
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
if self.best is None:
self.best = val_loss
else:
self.best = min(self.best, val_loss)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.get_lr()
class LegacyFairseqLRScheduler(FairseqLRScheduler):
def __init__(self, args: Namespace, optimizer):
if not isinstance(optimizer, FairseqOptimizer):
raise ValueError("optimizer must be an instance of FairseqOptimizer")
self.args = args
self.optimizer = optimizer
self.best = None
| 2,031 | 32.866667 | 81 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/fixed_schedule.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Optional, List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class FixedLRScheduleConfig(FairseqDataclass):
force_anneal: Optional[int] = field(
default=None,
metadata={"help": "force annealing at specified epoch"},
)
lr_shrink: float = field(
default=0.1,
metadata={"help": "shrink factor for annealing, lr_new = (lr * lr_shrink)"},
)
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("fixed", dataclass=FixedLRScheduleConfig)
class FixedLRSchedule(FairseqLRScheduler):
"""Decay the LR on a fixed schedule."""
def __init__(self, cfg: FixedLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
self.lr = cfg.lr[0]
if cfg.warmup_updates > 0:
self.warmup_factor = 1.0 / cfg.warmup_updates
else:
self.warmup_factor = 1
def state_dict(self):
return {"lr": self.lr}
def load_state_dict(self, state_dict):
if "lr" in state_dict:
self.lr = state_dict["lr"]
def get_next_lr(self, epoch):
lrs = self.cfg.lr
if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch - 1, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = lrs[-1] * self.cfg.lr_shrink ** (
epoch + 1 - self.cfg.force_anneal
)
return next_lr
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if self.cfg.warmup_updates > 0 and num_updates < self.cfg.warmup_updates:
self.warmup_factor = (num_updates + 1) / float(self.cfg.warmup_updates)
self.optimizer.set_lr(self.warmup_factor * self.lr)
else:
self.optimizer.set_lr(self.lr)
return self.optimizer.get_lr()
| 2,643 | 33.337662 | 87 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class InverseSquareRootLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=4000,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("inverse_sqrt", dataclass=InverseSquareRootLRScheduleConfig)
class InverseSquareRootSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = cfg.lr * sqrt(cfg.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, cfg: InverseSquareRootLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with inverse_sqrt."
" Consider --lr-scheduler=fixed instead."
)
warmup_end_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr
# linearly warmup for the first cfg.warmup_updates
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * cfg.warmup_updates**0.5
# initial learning rate
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr
| 3,228 | 36.546512 | 87 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/manual_lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import LegacyFairseqLRScheduler, register_lr_scheduler
import logging
import ast
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
@register_lr_scheduler("manual")
class ManualSchedule(LegacyFairseqLRScheduler):
"""Decay the LR on a manual schedule."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
self.epoch2lr = self.parse_manuallr_args(args.epoch2lr)
self.update2lr = self.parse_manuallr_args(args.update2lr)
logger.info("@@@ ManualSchedule epoch2lr={}".format(self.epoch2lr))
logger.info("@@@ ManualSchedule update2lr={}".format(self.update2lr))
if 1 in self.epoch2lr:
self.lr = self.epoch2lr[1]
elif 1 in self.update2lr:
self.lr = self.update2lr[1]
else:
self.lr = args.lr[0]
self.optimizer.set_lr(self.lr) # Set the beginning of the epoch.
def parse_manuallr_args(self, lr_args_str):
lr_dict = ast.literal_eval(lr_args_str.replace(" ", ""))
if not isinstance(lr_dict, dict):
raise ValueError("epoch2lr/update2lr must be abel to evaluated to a dict")
lr_args = {}
logger.info("@@@ after parsing input dictionary lr_dict = {}".format(lr_dict))
for key, val in lr_dict.items():
if "," in key:
for k in key.split(","):
lr_args[int(k)] = float(val)
elif "-" in key:
s = int(key.split("-")[0])
e = int(key.split("-")[1])
for k in range(s, e + 1, 1):
lr_args[k] = float(val)
else:
lr_args[int(key)] = float(val)
return lr_args
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
# fmt: off
parser.add_argument(
"--epoch2lr",
type=str,
metavar="DICT",
default="{}",
help="a dictionary used to set lr for each epoch manually",
)
parser.add_argument(
"--update2lr",
type=str,
metavar="DICT",
default="{}",
help="a dictionary used to set lr for each update manually",
)
# fmt: on
def state_dict(self):
return {"lr": self.lr}
def load_state_dict(self, state_dict):
if "lr" in state_dict:
self.lr = state_dict["lr"]
def get_next_lr(self, epoch):
manual_keys = [k for k in self.epoch2lr if k <= epoch]
if manual_keys:
manual_lr = self.epoch2lr[max(manual_keys)]
else:
logger.warning(
"@@@ epoch={} does not exist in manual lr input. epoch2lr={}...".format(
epoch,
list(self.epoch2lr.items())[
: min(10, len(self.epoch2lr.keys()) - 1)
],
)
)
manual_lr = self.optimizer.get_lr()
return manual_lr
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
manual_keys = [k for k in self.update2lr if k <= num_updates]
if manual_keys:
manual_lr = self.update2lr[max(manual_keys)]
else:
logger.warning(
"epoch={} does not exist in manual lr input update2lr={}...".format(
num_updates,
list(self.update2lr.items())[
: min(10, len(self.update2lr.keys()) - 1)
],
)
)
manual_lr = self.optimizer.get_lr()
self.optimizer.set_lr(manual_lr)
return self.optimizer.get_lr()
| 4,174 | 33.221311 | 88 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/pass_through.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class PassThroughScheduleConfig(FairseqDataclass):
pass
@register_lr_scheduler("pass_through", dataclass=PassThroughScheduleConfig)
class PassThroughScheduleSchedule(FairseqLRScheduler):
"""Delegate lr scheduling to the optimizer."""
def __init__(self, cfg: PassThroughScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
assert (
hasattr(optimizer, "lr_scheduler") and optimizer.lr_scheduler is not None
), "Pass-through schedule can only be used with optimizers with their own schedulers"
def state_dict(self):
return self.optimizer.lr_scheduler.state_dict()
def load_state_dict(self, state_dict):
self.optimizer.lr_scheduler.load_state_dict(state_dict)
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
return self.optimizer.lr_scheduler.step_begin_epoch(epoch)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.lr_scheduler.step_update(num_updates)
| 1,445 | 35.15 | 93 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Optional, List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class PolynomialDecayLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
force_anneal: Optional[int] = field(
default=None,
metadata={"help": "force annealing at specified epoch"},
)
end_learning_rate: float = field(
default=0.0,
metadata={"help": "learning rate to decay to"},
)
power: float = field(
default=1.0,
metadata={"help": "decay exponent"},
)
total_num_update: float = field(
default=II("optimization.max_update"),
metadata={"help": "total number of updates over which to decay learning rate"},
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("polynomial_decay", dataclass=PolynomialDecayLRScheduleConfig)
class PolynomialDecayLRSchedule(FairseqLRScheduler):
"""Decay the LR on a fixed schedule."""
def __init__(self, cfg: PolynomialDecayLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
assert cfg.total_num_update > 0
self.lr = cfg.lr[0]
if cfg.warmup_updates > 0:
self.warmup_factor = 1.0 / cfg.warmup_updates
else:
self.warmup_factor = 1
self.end_learning_rate = cfg.end_learning_rate
self.total_num_update = cfg.total_num_update
self.power = cfg.power
self.optimizer.set_lr(self.warmup_factor * self.lr)
def get_next_lr(self, epoch):
lrs = self.cfg.lr
if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = self.optimizer.get_lr()
return next_lr
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if self.cfg.warmup_updates > 0 and num_updates <= self.cfg.warmup_updates:
self.warmup_factor = num_updates / float(self.cfg.warmup_updates)
lr = self.warmup_factor * self.lr
elif num_updates >= self.total_num_update:
lr = self.end_learning_rate
else:
warmup = self.cfg.warmup_updates
lr_range = self.lr - self.end_learning_rate
pct_remaining = 1 - (num_updates - warmup) / (
self.total_num_update - warmup
)
lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate
self.optimizer.set_lr(lr)
return self.optimizer.get_lr()
| 3,302 | 35.7 | 87 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List
import torch.optim.lr_scheduler
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class ReduceLROnPlateauLRScheduleConfig(FairseqDataclass):
lr_shrink: float = field(
default=0.1, metadata={"help": "shrink factor for annealing"}
)
lr_threshold: float = field(
default=1e-4,
metadata={
"help": (
"threshold for measuring the new optimum, to only focus on "
"significant changes"
)
},
)
lr_patience: int = field(
default=0,
metadata={
"help": (
"number of epochs with no improvement after which learning rate will "
"be reduced"
)
},
)
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = II("optimization.lr")
maximize_best_checkpoint_metric: bool = II(
"checkpoint.maximize_best_checkpoint_metric"
)
@register_lr_scheduler(
"reduce_lr_on_plateau", dataclass=ReduceLROnPlateauLRScheduleConfig
)
class ReduceLROnPlateauLRSchedule(FairseqLRScheduler):
"""
Decay the LR by a factor every time the validation loss plateaus.
Also comes with optional warmup phase, where we linearly increase
the learning rate from some initial learning rate
(``--warmup-init-lr``) until the configured learning rate
(``--lr``). Thereafter the lr is adjusted according to original
reduce_on_plateau scheme.
During warmup::
lrs = torch.linspace(
cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates
)
lr = lrs[update_num]
"""
def __init__(self, cfg: ReduceLROnPlateauLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with reduce_lr_on_plateau."
" Consider --lr-scheduler=fixed instead."
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer.optimizer,
patience=cfg.lr_patience,
factor=cfg.lr_shrink,
mode="max" if cfg.maximize_best_checkpoint_metric else "min",
threshold=cfg.lr_threshold,
)
warmup_end_lr = cfg.lr[0]
# if no warm up, sets initial lr to be cfg.lr[0]
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr
# linearly warmup for the first cfg.warmup_updates
if cfg.warmup_updates > 0:
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
# this flag is either set from arg when no warm up, or set by
# step_update() when warmup finishes
self.warmup_end = True if cfg.warmup_updates <= 0 else False
# initial learning rate
# this self.lr is used only during init and/or warm up period
self.lr = warmup_end_lr if self.warmup_end else cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def state_dict(self):
"""Return the LR scheduler state dict."""
return {
"best": self.lr_scheduler.best,
"last_epoch": self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.lr_scheduler.best = state_dict["best"]
if "last_epoch" in state_dict:
self.lr_scheduler.last_epoch = state_dict["last_epoch"]
def step(self, epoch, val_loss=None):
"""
Update the learning rate at the end of the given epoch if warmup
finishes otherwise no update of lr on epoch boundaries
"""
if val_loss is not None and self.warmup_end is True:
self.lr_scheduler.step(val_loss)
else:
self.lr_scheduler.last_epoch = epoch
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""
Update the learning rate after each update."""
# if there is warmup
if self.cfg.warmup_updates > 0:
if num_updates <= self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
self.optimizer.set_lr(self.lr)
else:
if self.warmup_end is False:
self.warmup_end = True
# else do nothing
return self.optimizer.get_lr()
| 5,047 | 34.055556 | 87 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/step_lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class StepLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = field(
default=II("optimization.lr"),
metadata={"help": "max learning rate, must be more than cfg.min_lr"},
)
min_lr: float = field(default=0.0, metadata={"help": "min learning rate"})
lr_deacy_period: int = field(default=25000, metadata={"help": "decay period"})
lr_decay: float = field(default=0.5, metadata={"help": "decay factor"})
@register_lr_scheduler("step", dataclass=StepLRScheduleConfig)
class StepLRSchedule(FairseqLRScheduler):
"""Decay learning rate every k updates by a fixed factor"""
def __init__(self, cfg: StepLRScheduleConfig, fairseq_optimizer):
super().__init__(cfg, fairseq_optimizer)
self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
self.min_lr = cfg.min_lr
self.lr_deacy_period = cfg.lr_deacy_period
self.lr_decay = cfg.lr_decay
self.warmup_updates = cfg.warmup_updates
self.warmup_init_lr = (
cfg.warmup_init_lr if cfg.warmup_init_lr >= 0 else self.min_lr
)
assert self.lr_deacy_period > 0
assert self.lr_decay <= 1
assert self.min_lr >= 0
assert self.max_lr > self.min_lr
if cfg.warmup_updates > 0:
# linearly warmup for the first cfg.warmup_updates
self.warmup_lr_step = (
self.max_lr - self.warmup_init_lr
) / self.warmup_updates
else:
self.warmup_lr_step = 1
# initial learning rate
self.lr = self.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.cfg.warmup_updates:
self.lr = self.warmup_init_lr + num_updates * self.warmup_lr_step
else:
curr_updates = num_updates - self.cfg.warmup_updates
lr_mult = self.lr_decay ** (curr_updates // self.lr_deacy_period)
self.lr = max(self.max_lr * lr_mult, self.min_lr)
self.optimizer.set_lr(self.lr)
return self.lr
| 3,154 | 35.686047 | 87 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import Optional, List, Tuple
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class TriStageLRScheduleConfig(FairseqDataclass):
warmup_steps: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
hold_steps: int = field(
default=0,
metadata={"help": "steps in hold stage"},
)
decay_steps: int = field(
default=0,
metadata={"help": "steps in decay stages"},
)
phase_ratio: Optional[Tuple[float, float, float]] = field(
default=None,
metadata={
"help": (
"if set, automatically sets warmup/hold/decay steps to the ratio "
"specified here from max_updates. the ratios must add up to 1.0"
)
},
)
init_lr_scale: float = field(
default=0.01,
metadata={"help": "initial learning rate scale during warmup phase"},
)
final_lr_scale: float = field(
default=0.01,
metadata={"help": "final learning rate scale"},
)
max_update: float = II("optimization.max_update")
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("tri_stage", dataclass=TriStageLRScheduleConfig)
class TriStageLRSchedule(FairseqLRScheduler):
"""Tristage learning rate schedulr
Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf
Similar to inverse_squre_root scheduler, but tri_stage learning rate employs
three stages LR scheduling:
- warmup stage, starting from `lr` * `init_lr_scale`, linearly
increased to `lr` in `warmup_steps` iterations
- hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps`
iterations
- decay stage, after hold stage, decay LR exponetially to
`lr` * `final_lr_scale` in `decay_steps`;
after that LR is keep as `final_lr_scale` * `lr`
During warmup::
init_lr = cfg.init_lr_scale * cfg.lr
lrs = torch.linspace(init_lr, cfg.lr, cfg.warmup_steps)
lr = lrs[update_num]
During hold::
lr = cfg.lr
During decay::
decay_factor = - math.log(cfg.final_lr_scale) / cfg.decay_steps
lr = cfg.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor)
After that::
lr = cfg.lr * cfg.final_lr_scale
"""
def __init__(self, cfg: TriStageLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with tri-stage lr."
" Consider --lr-scheduler=fixed instead."
)
# calculate LR at each point
self.peak_lr = cfg.lr[0]
self.init_lr = cfg.init_lr_scale * cfg.lr[0]
self.final_lr = cfg.final_lr_scale * cfg.lr[0]
if cfg.phase_ratio is not None:
assert cfg.max_update > 0
assert sum(cfg.phase_ratio) == 1, "phase ratios must add up to 1"
self.warmup_steps = int(cfg.max_update * cfg.phase_ratio[0])
self.hold_steps = int(cfg.max_update * cfg.phase_ratio[1])
self.decay_steps = int(cfg.max_update * cfg.phase_ratio[2])
else:
self.warmup_steps = cfg.warmup_steps
self.hold_steps = cfg.hold_steps
self.decay_steps = cfg.decay_steps
assert (
self.warmup_steps + self.hold_steps + self.decay_steps > 0
), "please specify steps or phase_ratio"
self.warmup_rate = (
(self.peak_lr - self.init_lr) / self.warmup_steps
if self.warmup_steps != 0
else 0
)
self.decay_factor = -math.log(cfg.final_lr_scale) / self.decay_steps
# initial learning rate
self.lr = self.init_lr
self.optimizer.set_lr(self.lr)
def _decide_stage(self, update_step):
"""
return stage, and the corresponding steps within the current stage
"""
if update_step < self.warmup_steps:
# warmup state
return 0, update_step
offset = self.warmup_steps
if update_step < offset + self.hold_steps:
# hold stage
return 1, update_step - offset
offset += self.hold_steps
if update_step <= offset + self.decay_steps:
# decay stage
return 2, update_step - offset
offset += self.decay_steps
# still here ? constant lr stage
return 3, update_step - offset
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
stage, steps_in_stage = self._decide_stage(num_updates)
if stage == 0:
self.lr = self.init_lr + self.warmup_rate * steps_in_stage
elif stage == 1:
self.lr = self.peak_lr
elif stage == 2:
self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage)
elif stage == 3:
self.lr = self.final_lr
else:
raise ValueError("Undefined stage")
self.optimizer.set_lr(self.lr)
return self.lr
| 5,766 | 31.767045 | 87 | py |
null | DA-Transformer-main/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class TriangularLRScheduleConfig(FairseqDataclass):
max_lr: float = field(
default="???", metadata={"help": "max learning rate, must be more than cfg.lr"}
)
lr_period_updates: float = field(
default=5000,
metadata={"help": "initial number of updates per period (cycle length)"},
)
lr_shrink: float = field(
default=0.1, metadata={"help": "shrink factor for annealing"}
)
shrink_min: bool = field(
default=False, metadata={"help": "if set, also shrinks min lr"}
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("triangular", dataclass=TriangularLRScheduleConfig)
class TriangularLRSchedule(FairseqLRScheduler):
"""Assign LR based on a triangular cyclical schedule.
See https://arxiv.org/pdf/1506.01186.pdf for details.
"""
def __init__(self, cfg: TriangularLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with triangular."
" Consider --lr-scheduler=fixed instead."
)
lr = cfg.lr[0]
assert cfg.max_lr > lr, "max_lr must be more than lr"
self.min_lr = lr
self.max_lr = cfg.max_lr
self.stepsize = cfg.lr_period_updates // 2
self.lr_shrink = cfg.lr_shrink
self.shrink_min = cfg.shrink_min
# initial learning rate
self.lr = self.min_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
cycle = math.floor(num_updates / (2 * self.stepsize))
lr_shrink = self.lr_shrink**cycle
max_lr = self.max_lr * lr_shrink
if self.shrink_min:
min_lr = self.min_lr * lr_shrink
else:
min_lr = self.min_lr
x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1)
self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x))
self.optimizer.set_lr(self.lr)
return self.lr
| 2,760 | 31.869048 | 87 | py |
null | DA-Transformer-main/fairseq/scoring/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from abc import ABC, abstractmethod
from fairseq import registry
from omegaconf import DictConfig
class BaseScorer(ABC):
def __init__(self, cfg):
self.cfg = cfg
self.ref = []
self.pred = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
@abstractmethod
def score(self) -> float:
pass
@abstractmethod
def result_string(self) -> str:
pass
_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
"--scoring", default="bleu"
)
def build_scorer(choice, tgt_dict):
_choice = choice._name if isinstance(choice, DictConfig) else choice
if _choice == "bleu":
from fairseq.scoring import bleu
return bleu.Scorer(
bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
)
return _build_scorer(choice)
# automatically import any Python files in the current directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
| 1,399 | 24 | 87 | py |
null | DA-Transformer-main/fairseq/scoring/bertscore.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import numpy as np
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
@dataclass
class BertScoreScorerConfig(FairseqDataclass):
bert_score_lang: str = field(default="en", metadata={"help": "BERTScore language"})
@register_scorer("bert_score", dataclass=BertScoreScorerConfig)
class BertScoreScorer(BaseScorer):
def __init__(self, cfg):
super(BertScoreScorer, self).__init__(cfg)
try:
import bert_score as _bert_score
except ImportError:
raise ImportError("Please install BERTScore: pip install bert-score")
self.cfg = cfg
self._bert_score = _bert_score
self.scores = None
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
def score(self, order=4):
_, _, self.scores = self._bert_score.score(
self.pred, self.ref, lang=self.cfg.bert_score_lang
)
self.scores = self.scores.numpy()
return np.mean(self.scores)
def result_string(self, order=4):
return f"BERTScore: {self.score():.4f}"
| 1,349 | 29 | 87 | py |
null | DA-Transformer-main/fairseq/scoring/bleu.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import math
import sys
from dataclasses import dataclass, field
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
from fairseq.scoring.tokenizer import EvaluationTokenizer
class BleuStat(ctypes.Structure):
_fields_ = [
("reflen", ctypes.c_size_t),
("predlen", ctypes.c_size_t),
("match1", ctypes.c_size_t),
("count1", ctypes.c_size_t),
("match2", ctypes.c_size_t),
("count2", ctypes.c_size_t),
("match3", ctypes.c_size_t),
("count3", ctypes.c_size_t),
("match4", ctypes.c_size_t),
("count4", ctypes.c_size_t),
]
@dataclass
class SacrebleuConfig(FairseqDataclass):
sacrebleu_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field(
default="13a", metadata={"help": "tokenizer"}
)
sacrebleu_lowercase: bool = field(
default=False, metadata={"help": "apply lowercasing"}
)
sacrebleu_char_level: bool = field(
default=False, metadata={"help": "evaluate at character level"}
)
@register_scorer("sacrebleu", dataclass=SacrebleuConfig)
class SacrebleuScorer(BaseScorer):
def __init__(self, cfg):
super(SacrebleuScorer, self).__init__(cfg)
import sacrebleu
self.sacrebleu = sacrebleu
self.tokenizer = EvaluationTokenizer(
tokenizer_type=cfg.sacrebleu_tokenizer,
lowercase=cfg.sacrebleu_lowercase,
character_tokenization=cfg.sacrebleu_char_level,
)
def add_string(self, ref, pred):
self.ref.append(self.tokenizer.tokenize(ref))
self.pred.append(self.tokenizer.tokenize(pred))
def _score(self, order=4):
if order != 4:
raise NotImplementedError
# tokenization and lowercasing are performed by self.tokenizer instead.
return self.sacrebleu.corpus_bleu(self.pred, [self.ref], tokenize="none")
def score(self, order=4):
return self._score(order).score
def result_string(self, order=4):
return self._score(order).format()
@dataclass
class BleuConfig(FairseqDataclass):
pad: int = field(default=1, metadata={"help": "padding index"})
eos: int = field(default=2, metadata={"help": "eos index"})
unk: int = field(default=3, metadata={"help": "unk index"})
@register_scorer("bleu", dataclass=BleuConfig)
class Scorer(object):
def __init__(self, cfg):
self.stat = BleuStat()
self.pad = cfg.pad
self.eos = cfg.eos
self.unk = cfg.unk
try:
from fairseq import libbleu
except ImportError as e:
sys.stderr.write(
"ERROR: missing libbleu.so. run `pip install --editable .`\n"
)
raise e
self.C = ctypes.cdll.LoadLibrary(libbleu.__file__)
self.reset()
def reset(self, one_init=False):
if one_init:
self.C.bleu_one_init(ctypes.byref(self.stat))
else:
self.C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
self.C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos),
)
def score(self, order=4):
psum = sum(
math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order]
)
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = "BLEU{} = {:2.2f}, {:2.1f}"
for _ in range(1, order):
fmt += "/{:2.1f}"
fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})"
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(
order,
self.score(order=order),
*bleup,
self.brevity(),
self.stat.predlen / self.stat.reflen,
self.stat.predlen,
self.stat.reflen
)
| 5,347 | 30.64497 | 88 | py |
null | DA-Transformer-main/fairseq/scoring/chrf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
@dataclass
class ChrFScorerConfig(FairseqDataclass):
pass
@register_scorer("chrf", dataclass=ChrFScorerConfig)
class ChrFScorer(BaseScorer):
def __init__(self, args):
super(ChrFScorer, self).__init__(args)
import sacrebleu
self.sacrebleu = sacrebleu
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4):
if order != 4:
raise NotImplementedError
return self.sacrebleu.corpus_chrf(self.pred, [self.ref]).format()
| 954 | 24.810811 | 73 | py |
null | DA-Transformer-main/fairseq/scoring/meteor.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from dataclasses import dataclass
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
@dataclass
class MeteorScorerConfig(FairseqDataclass):
pass
@register_scorer("meteor", dataclass=MeteorScorerConfig)
class MeteorScorer(BaseScorer):
def __init__(self, args):
super(MeteorScorer, self).__init__(args)
try:
import nltk
except ImportError:
raise ImportError("Please install nltk to use METEOR scorer")
self.nltk = nltk
self.scores = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
def score(self, order=4):
self.scores = [
self.nltk.translate.meteor_score.single_meteor_score(r, p)
for r, p in zip(self.ref, self.pred)
]
return np.mean(self.scores)
def result_string(self, order=4):
return f"METEOR: {self.score():.4f}"
| 1,164 | 26.093023 | 73 | py |
null | DA-Transformer-main/fairseq/scoring/tokenizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unicodedata
import sacrebleu as sb
from fairseq.dataclass import ChoiceEnum
SACREBLEU_V2_ABOVE = int(sb.__version__[0]) >= 2
class EvaluationTokenizer(object):
"""A generic evaluation-time tokenizer, which leverages built-in tokenizers
in sacreBLEU (https://github.com/mjpost/sacrebleu). It additionally provides
lowercasing, punctuation removal and character tokenization, which are
applied after sacreBLEU tokenization.
Args:
tokenizer_type (str): the type of sacreBLEU tokenizer to apply.
lowercase (bool): lowercase the text.
punctuation_removal (bool): remove punctuation (based on unicode
category) from text.
character_tokenization (bool): tokenize the text to characters.
"""
SPACE = chr(32)
SPACE_ESCAPE = chr(9601)
_ALL_TOKENIZER_TYPES = (
sb.BLEU.TOKENIZERS
if SACREBLEU_V2_ABOVE
else ["none", "13a", "intl", "zh", "ja-mecab"]
)
ALL_TOKENIZER_TYPES = ChoiceEnum(_ALL_TOKENIZER_TYPES)
def __init__(
self,
tokenizer_type: str = "13a",
lowercase: bool = False,
punctuation_removal: bool = False,
character_tokenization: bool = False,
):
assert (
tokenizer_type in self._ALL_TOKENIZER_TYPES
), f"{tokenizer_type}, {self._ALL_TOKENIZER_TYPES}"
self.lowercase = lowercase
self.punctuation_removal = punctuation_removal
self.character_tokenization = character_tokenization
if SACREBLEU_V2_ABOVE:
self.tokenizer = sb.BLEU(tokenize=str(tokenizer_type)).tokenizer
else:
self.tokenizer = sb.tokenizers.TOKENIZERS[tokenizer_type]()
@classmethod
def remove_punctuation(cls, sent: str):
"""Remove punctuation based on Unicode category."""
return cls.SPACE.join(
t
for t in sent.split(cls.SPACE)
if not all(unicodedata.category(c)[0] == "P" for c in t)
)
def tokenize(self, sent: str):
tokenized = self.tokenizer(sent)
if self.punctuation_removal:
tokenized = self.remove_punctuation(tokenized)
if self.character_tokenization:
tokenized = self.SPACE.join(
list(tokenized.replace(self.SPACE, self.SPACE_ESCAPE))
)
if self.lowercase:
tokenized = tokenized.lower()
return tokenized
| 2,599 | 31.098765 | 80 | py |
null | DA-Transformer-main/fairseq/scoring/wer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
from fairseq.scoring.tokenizer import EvaluationTokenizer
@dataclass
class WerScorerConfig(FairseqDataclass):
wer_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field(
default="none", metadata={"help": "sacreBLEU tokenizer to use for evaluation"}
)
wer_remove_punct: bool = field(
default=False, metadata={"help": "remove punctuation"}
)
wer_char_level: bool = field(
default=False, metadata={"help": "evaluate at character level"}
)
wer_lowercase: bool = field(default=False, metadata={"help": "lowercasing"})
@register_scorer("wer", dataclass=WerScorerConfig)
class WerScorer(BaseScorer):
def __init__(self, cfg):
super().__init__(cfg)
self.reset()
try:
import editdistance as ed
except ImportError:
raise ImportError("Please install editdistance to use WER scorer")
self.ed = ed
self.tokenizer = EvaluationTokenizer(
tokenizer_type=self.cfg.wer_tokenizer,
lowercase=self.cfg.wer_lowercase,
punctuation_removal=self.cfg.wer_remove_punct,
character_tokenization=self.cfg.wer_char_level,
)
def reset(self):
self.distance = 0
self.ref_length = 0
def add_string(self, ref, pred):
ref_items = self.tokenizer.tokenize(ref).split()
pred_items = self.tokenizer.tokenize(pred).split()
self.distance += self.ed.eval(ref_items, pred_items)
self.ref_length += len(ref_items)
def result_string(self):
return f"WER: {self.score():.2f}"
def score(self):
return 100.0 * self.distance / self.ref_length if self.ref_length > 0 else 0
| 2,019 | 33.237288 | 86 | py |
null | DA-Transformer-main/fairseq/tasks/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from .fairseq_task import FairseqTask, LegacyFairseqTask # noqa
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(cfg: FairseqDataclass, **kwargs):
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
# legacy tasks
task = TASK_REGISTRY[task_name]
if task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = dc.from_namespace(cfg)
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert (
task is not None
), f"Could not infer task type from {cfg}. Available argparse tasks: {TASK_REGISTRY.keys()}. Available hydra tasks: {TASK_DATACLASS_REGISTRY.keys()}"
return task.setup_task(cfg, **kwargs)
def register_task(name, dataclass=None):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError("Cannot register duplicate task ({})".format(name))
if not issubclass(cls, FairseqTask):
raise ValueError(
"Task ({}: {}) must extend FairseqTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError(
"Cannot register task with duplicate class name ({})".format(
cls.__name__
)
)
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="task", node=node, provider="fairseq")
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
def import_tasks(tasks_dir, namespace):
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
import_tasks(tasks_dir, "fairseq.tasks")
| 4,365 | 30.868613 | 153 | py |
null | DA-Transformer-main/fairseq/tasks/audio_finetuning.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import torch
import json
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Optional, Any
from fairseq.data import AddTargetDataset, Dictionary, encoders
from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.configs import GenerationConfig
from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel
from . import register_task
from .. import utils
from ..logging import metrics
logger = logging.getLogger(__name__)
class LabelEncoder(object):
def __init__(self, dictionary):
self.dictionary = dictionary
def __call__(self, label):
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False
)
def label_len_fn(label):
return len(label.split(" "))
@dataclass
class AudioFinetuningConfig(AudioPretrainingConfig):
# Options for reporting WER metrics during validation. Only applicable to
# Seq2Seq models during fine-tuning
eval_wer: bool = field(
default=False, metadata={"help": "compute WER for Seq2Seq models"}
)
eval_wer_config: GenerationConfig = field(
default_factory=lambda: GenerationConfig(),
metadata={"help": "beam search config for evaluating wer during training"},
)
eval_wer_tokenizer: Any = field(
default=None,
metadata={"help": "tokenizer config for evaluating wer during training"},
)
eval_wer_post_process: str = field(
default="letter",
metadata={
"help": "remove BPE tokens before scoring (can be sentencepiece, letter, and more)"
},
)
eval_bleu: bool = field(
default=False, metadata={"help": "evaluation with BLEU scores"}
)
eval_bleu_detok: Optional[str] = field(
default=None,
metadata={
"help": "detokenize before computing BLEU (e.g., 'moses'); "
"required if using --eval-bleu; use 'space' to disable "
"detokenization; see fairseq.data.encoders for other options"
},
)
eval_bleu_detok_args: str = field(
default="{}", metadata={"help": "args for building the tokenizer, if needed"}
)
eval_tokenized_bleu: bool = field(
default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
)
eval_bleu_remove_bpe: Optional[str] = field(
default=None, metadata={"help": "remove BPE before computing BLEU"}
)
eval_bleu_args: str = field(
default="{}",
metadata={
"help": "generation args for BLUE scoring, e.g., "
'\'{"beam": 4, "lenpen": 0.6}\''
},
)
eval_bleu_print_samples: bool = field(
default=False, metadata={"help": "print sample generations during validation"}
)
autoregressive: bool = field(
default=False,
metadata={
"help": "required for autoregressive decoders (like seq2seq models); "
"adds 'prev_output_tokens' to input and appends eos to target"
},
)
@register_task("audio_finetuning", dataclass=AudioFinetuningConfig)
class AudioFinetuningTask(AudioPretrainingTask):
""" """
cfg: AudioFinetuningConfig
def __init__(
self,
cfg: AudioFinetuningConfig,
):
super().__init__(cfg)
self.blank_symbol = "<s>"
self.state.add_factory("target_dictionary", self.load_target_dictionary)
def load_target_dictionary(self):
if self.cfg.labels:
dict_path = os.path.join(self.cfg.data, f"dict.{self.cfg.labels}.txt")
return Dictionary.load(dict_path)
return None
def load_dataset(
self, split: str, task_cfg: AudioFinetuningConfig = None, **kwargs
):
super().load_dataset(split, task_cfg, **kwargs)
task_cfg = task_cfg or self.cfg
assert task_cfg.labels is not None
text_compression_level = getattr(
TextCompressionLevel, str(self.cfg.text_compression_level)
)
data_path = self.cfg.data
label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}")
skipped_indices = getattr(self.datasets[split], "skipped_indices", set())
text_compressor = TextCompressor(level=text_compression_level)
with open(label_path, "r") as f:
labels = [
text_compressor.compress(l)
for i, l in enumerate(f)
if i not in skipped_indices
]
assert len(labels) == len(self.datasets[split]), (
f"labels length ({len(labels)}) and dataset length "
f"({len(self.datasets[split])}) do not match"
)
process_label = LabelEncoder(self.target_dictionary)
self.datasets[split] = AddTargetDataset(
self.datasets[split],
labels,
pad=self.target_dictionary.pad(),
eos=self.target_dictionary.eos(),
batch_targets=True,
process_label=process_label,
label_len_fn=label_len_fn,
add_to_input=task_cfg.get("autoregressive", False),
text_compression_level=text_compression_level,
)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.state.target_dictionary
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.cfg.eval_wer and self.cfg.autoregressive:
metrics = self._inference_with_wer(self.sequence_generator, sample, model)
logging_output["_num_char_errors"] = metrics["num_char_errors"]
logging_output["_num_chars"] = metrics["num_chars"]
logging_output["_num_word_errors"] = metrics["num_word_errors"]
logging_output["_num_words"] = metrics["num_words"]
if self.cfg.eval_bleu and self.cfg.autoregressive:
metrics = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = metrics.sys_len
logging_output["_bleu_ref_len"] = metrics.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(metrics.counts) == 4
for i in range(4):
logging_output[f"_bleu_counts_{i}"] = metrics.counts[i]
logging_output[f"_bleu_totals_{i}"] = metrics.totals[i]
return loss, sample_size, logging_output
def build_model(self, model_cfg: FairseqDataclass, from_checkpoint=False):
model = super().build_model(model_cfg, from_checkpoint)
if self.cfg.eval_wer and self.cfg.autoregressive:
self.sequence_generator = self.build_generator(
[model],
self.cfg.eval_wer_config,
)
if self.cfg.eval_wer_tokenizer:
self.tokenizer = encoders.build_tokenizer(self.cfg.eval_wer_tokenizer)
else:
self.tokenizer = None
if self.cfg.eval_bleu and self.cfg.autoregressive:
assert self.cfg.eval_bleu_detok is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
)
gen_args = json.loads(self.cfg.eval_bleu_args)
gen_args = Namespace(**gen_args)
self.sequence_generator = self.build_generator([model], gen_args)
return model
def _inference_with_wer(self, generator, sample, model):
import editdistance
def decode(toks):
s = self.target_dictionary.string(
toks.int().cpu(),
self.cfg.eval_wer_post_process,
escape_unk=True,
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
num_word_errors, num_char_errors = 0, 0
num_chars, num_words = 0, 0
gen_out = self.inference_step(generator, [model], sample, None)
for i in range(len(gen_out)):
hyp = decode(gen_out[i][0]["tokens"])
ref = decode(
utils.strip_pad(sample["target"][i], self.target_dictionary.pad()),
)
num_char_errors += editdistance.eval(hyp, ref)
num_chars += len(ref)
hyp_words = hyp.split()
ref_words = ref.split()
num_word_errors += editdistance.eval(hyp_words, ref_words)
num_words += len(ref_words)
return {
"num_char_errors": num_char_errors,
"num_chars": num_chars,
"num_word_errors": num_word_errors,
"num_words": num_words,
}
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, is_ref):
s = self.target_dictionary.string(
toks.int().cpu(),
self.cfg.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if is_ref else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"], is_ref=False))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.target_dictionary.pad()),
is_ref=True, # don't count <unk> as matches to the hypo
)
)
if self.cfg.eval_bleu_print_samples:
logger.info("H-{} {}".format(sample["id"][0], hyps[0]))
logger.info("T-{} {}".format(sample["id"][0], refs[0]))
eval_tokenization = "none" if self.cfg.eval_tokenized_bleu else "13a"
return sacrebleu.corpus_bleu(hyps, [refs], tokenize=eval_tokenization)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_wer:
zero = torch.scalar_tensor(0.0)
num_char_errors = sum(
log.get("_num_char_errors", zero) for log in logging_outputs
)
num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
num_word_errors = sum(
log.get("_num_word_errors", zero) for log in logging_outputs
)
num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
metrics.log_scalar("_num_char_errors", num_char_errors)
metrics.log_scalar("_num_chars", num_chars)
metrics.log_scalar("_num_word_errors", num_word_errors)
metrics.log_scalar("_num_words", num_words)
if num_chars > 0:
metrics.log_derived(
"uer",
lambda meters: meters["_num_char_errors"].sum
* 100.0
/ meters["_num_chars"].sum
if meters["_num_chars"].sum > 0
else float("nan"),
)
if num_words > 0:
metrics.log_derived(
"wer",
lambda meters: meters["_num_word_errors"].sum
* 100.0
/ meters["_num_words"].sum
if meters["_num_words"].sum > 0
else float("nan"),
)
if self.cfg.eval_bleu:
len_keys = ["_bleu_sys_len", "_bleu_ref_len"]
count_keys = [f"_bleu_counts_{i}" for i in range(4)]
total_keys = [f"_bleu_totals_{i}" for i in range(4)]
for k in len_keys + count_keys + total_keys:
metrics.log_scalar(k, sum(log.get(k, 0) for log in logging_outputs))
import sacrebleu
metrics.log_derived(
"bleu",
lambda meters: sacrebleu.compute_bleu(
correct=[meters[k].sum for k in count_keys],
total=[meters[k].sum for k in total_keys],
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
smooth_method="exp",
).score,
)
| 13,503 | 38.255814 | 95 | py |
null | DA-Transformer-main/fairseq/tasks/audio_pretraining.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import sys
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import MISSING, II, OmegaConf
from fairseq.data import BinarizedAudioDataset, FileAudioDataset
from fairseq.dataclass import FairseqDataclass, ChoiceEnum
from fairseq.data.text_compressor import TextCompressionLevel
from . import FairseqTask, register_task
logger = logging.getLogger(__name__)
@dataclass
class InferredW2vConfig:
# The following are needed to precompute mask and mask channel indices
# before model's forward.
mask_length: Optional[int] = II("model.mask_length")
mask_prob: Optional[float] = II("model.mask_prob")
mask_selection: Optional[str] = II("model.mask_selection")
mask_other: Optional[float] = II("model.mask_other")
no_mask_overlap: Optional[bool] = II("model.no_mask_overlap")
mask_min_space: Optional[int] = II("model.mask_min_space")
mask_channel_length: Optional[int] = II("model.mask_channel_length")
mask_channel_prob: Optional[float] = II("model.mask_channel_prob")
mask_channel_selection: Optional[str] = II("model.mask_channel_selection")
mask_channel_other: Optional[float] = II("model.mask_channel_other")
no_mask_channel_overlap: Optional[bool] = II("model.no_mask_channel_overlap")
mask_channel_min_space: Optional[int] = II("model.mask_channel_min_space")
conv_feature_layers: Optional[str] = II("model.conv_feature_layers")
encoder_embed_dim: Optional[int] = II("model.encoder_embed_dim")
@dataclass
class AudioPretrainingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
labels: Optional[str] = field(
default=None,
metadata={"help": "extension of the label file to load, used for fine-tuning"},
)
binarized_dataset: bool = field(
default=False,
metadata={
"help": "if true, loads binarized dataset (useful for very large datasets). "
"See examples/wav2vec/scripts/binarize_manifest.sh"
},
)
sample_rate: int = field(
default=16_000,
metadata={
"help": "target sample rate. audio files will be up/down sampled to this rate"
},
)
normalize: bool = field(
default=False,
metadata={"help": "if set, normalizes input to have 0 mean and unit variance"},
)
enable_padding: bool = field(
default=False, metadata={"help": "pad shorter samples instead of cropping"}
)
max_sample_size: Optional[int] = field(
default=None, metadata={"help": "max sample size to crop to for batching"}
)
min_sample_size: Optional[int] = field(
default=None, metadata={"help": "min sample size to skip small examples"}
)
num_batch_buckets: int = field(
default=0,
metadata={"help": "number of buckets"},
)
precompute_mask_indices: bool = field(
default=False,
metadata={
"help": "flag to compute mask indices in data preparation.",
},
)
inferred_w2v_config: Optional[InferredW2vConfig] = field(
default=None,
metadata={
"help": "wav2vec 2.0 masking arguments used to pre-compute masks (required for TPU)",
},
)
tpu: bool = II("common.tpu")
text_compression_level: ChoiceEnum([x.name for x in TextCompressionLevel]) = field(
default="none",
metadata={
"help": "compression level for texts (e.g. audio filenames, "
"target texts): none/low/high (default: none). "
},
)
@register_task("audio_pretraining", dataclass=AudioPretrainingConfig)
class AudioPretrainingTask(FairseqTask):
""" """
cfg: AudioPretrainingConfig
@classmethod
def setup_task(cls, cfg: AudioPretrainingConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
return cls(cfg)
def _get_mask_precompute_kwargs(self, cfg):
if self.cfg.precompute_mask_indices or self.cfg.tpu:
assert (
cfg.inferred_w2v_config is not None
), "inferred_w2v_config must be set"
return OmegaConf.to_container(
cfg.inferred_w2v_config, resolve=True, enum_to_str=True
)
else:
return {}
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
task_cfg = task_cfg or self.cfg
# upgrade old task
if isinstance(task_cfg, Namespace):
if not hasattr(task_cfg, "autoregressive"):
task_cfg.autoregressive = not task_cfg.criterion == "ctc"
text_compression_level = getattr(
TextCompressionLevel, str(self.cfg.text_compression_level)
)
if getattr(task_cfg, "binarized_dataset", False):
self.datasets[split] = BinarizedAudioDataset(
data_path,
split=split,
sample_rate=task_cfg.get("sample_rate", self.cfg.sample_rate),
max_sample_size=self.cfg.max_sample_size,
min_sample_size=self.cfg.min_sample_size,
pad=task_cfg.labels is not None or task_cfg.enable_padding,
normalize=task_cfg.normalize,
num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu),
compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu),
**self._get_mask_precompute_kwargs(task_cfg),
)
else:
manifest_path = os.path.join(data_path, "{}.tsv".format(split))
self.datasets[split] = FileAudioDataset(
manifest_path=manifest_path,
sample_rate=task_cfg.get("sample_rate", self.cfg.sample_rate),
max_sample_size=self.cfg.max_sample_size,
min_sample_size=self.cfg.min_sample_size,
pad=task_cfg.labels is not None or task_cfg.enable_padding,
normalize=task_cfg.normalize,
num_buckets=self.cfg.num_batch_buckets or int(self.cfg.tpu),
compute_mask_indices=(self.cfg.precompute_mask_indices or self.cfg.tpu),
text_compression_level=text_compression_level,
**self._get_mask_precompute_kwargs(task_cfg),
)
if self.cfg.tpu and task_cfg.inferred_w2v_config.mask_channel_prob == 0.0:
logger.info(
"Pretraining on TPUs may suffer convergence "
"issues when training with `mask_channel_prob` value of "
"0. You may want to set this to a low value close to 0."
)
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
def max_positions(self):
"""Maximum input length supported by the encoder."""
return sys.maxsize, sys.maxsize
def build_model(self, model_cfg: FairseqDataclass, from_checkpoint=False):
model = super().build_model(model_cfg, from_checkpoint)
actualized_cfg = getattr(model, "cfg", None)
if actualized_cfg is not None:
# if "w2v_args" in actualized_cfg:
if hasattr(actualized_cfg, "w2v_args"):
model_cfg.w2v_args = actualized_cfg.w2v_args
return model
| 7,781 | 36.776699 | 97 | py |
null | DA-Transformer-main/fairseq/tasks/cross_lingual_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
from collections import OrderedDict
import numpy as np
from fairseq import tokenizer, utils
from fairseq.data import ConcatDataset, Dictionary, TokenBlockDataset, data_utils
from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("cross_lingual_lm")
class CrossLingualLMTask(LegacyFairseqTask):
"""
Task for training cross-lingual language models.
For more details look at: https://arxiv.org/pdf/1901.07291.pdf
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments" " per sample",
)
parser.add_argument(
"--monolingual-langs",
default="en",
type=str,
help="comma separated list of languages for which we"
" want to train XLM on",
)
parser.add_argument(
"--shuffle",
action="store_true",
help="shuffle each monolingual dataset while" " training",
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.distributed_world_size = args.distributed_world_size
self.langs2id = self._lang_to_id(args.monolingual_langs)
def _lang_to_id(self, languages: str):
"""
Build a map from languages to ids. These ids are used as segment labels
for cross-lingual LM training.
"""
lang2id = {}
langs = [l.strip() for l in languages.split(",")]
for id, lang in enumerate(langs):
lang2id[lang] = id
return lang2id
@classmethod
def load_dictionary(cls, filename):
return MaskedLMDictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
d = MaskedLMDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@property
def target_dictionary(self):
return self.dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
dictionary = MaskedLMDictionary.load(os.path.join(args.data, "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def _load_single_lang_dataset(self, split, epoch):
loaded_datasets = []
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
path = os.path.join(data_path, split_k)
ds = data_utils.load_indexed_dataset(
path, self.dictionary, self.args.dataset_impl
)
if ds is None:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
# Since we append each block with the classification_token,
# we need to effectively create blocks of length
# tokens_per_sample-1
loaded_datasets.append(
TokenBlockDataset(
ds,
ds.sizes,
self.args.tokens_per_sample - 1,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
)
)
logger.info(
"{} {} {} examples".format(data_path, split_k, len(loaded_datasets[-1]))
)
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
return dataset, sizes
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
dataset_map = OrderedDict()
for lang in self.langs2id.keys():
# Datasets are expected to be in "split.lang" format (Eg: train.en)
language_split = "{}.{}".format(split, lang)
block_dataset, sizes = self._load_single_lang_dataset(
split=language_split, epoch=epoch
)
dataset_map[lang] = MaskedLMDataset(
dataset=block_dataset,
sizes=sizes,
vocab=self.dictionary,
pad_idx=self.dictionary.pad(),
mask_idx=self.dictionary.mask(),
classif_token_idx=self.dictionary.eos(),
sep_token_idx=self.dictionary.eos(),
shuffle=getattr(self.args, "shuffle", False),
has_pairs=False,
segment_id=self.langs2id[lang],
seed=self.seed,
)
self.datasets[split] = MultiCorpusSampledDataset(dataset_map)
logger.info(
"{} {} {} examples".format(
utils.split_paths(self.args.data)[epoch - 1],
split,
len(self.datasets[split]),
)
)
| 6,454 | 32.619792 | 88 | py |
null | DA-Transformer-main/fairseq/tasks/denoising.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
DenoisingDataset,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
data_utils,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
import numpy as np
logger = logging.getLogger(__name__)
@register_task("denoising")
class DenoisingTask(LegacyFairseqTask):
"""
Denoising task for applying sequence to sequence denoising. (ie. BART)
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments"
" per sample for dataset",
)
parser.add_argument(
"--sample-break-mode",
default="complete_doc",
type=str,
help="mode for breaking sentence",
)
parser.add_argument(
"--mask",
default=0.0,
type=float,
help="fraction of words/subwords that will be masked",
)
parser.add_argument(
"--mask-random",
default=0.0,
type=float,
help="instead of using [MASK], use random token this often",
)
parser.add_argument(
"--insert",
default=0.0,
type=float,
help="insert this percentage of additional random tokens",
)
parser.add_argument(
"--permute",
default=0.0,
type=float,
help="take this proportion of subwords and permute them",
)
parser.add_argument(
"--rotate",
default=0.5,
type=float,
help="rotate this proportion of inputs",
)
parser.add_argument(
"--poisson-lambda",
default=3.0,
type=float,
help="randomly shuffle sentences for this proportion of inputs",
)
parser.add_argument(
"--permute-sentences",
default=0.0,
type=float,
help="shuffle this proportion of sentences in all inputs",
)
parser.add_argument(
"--mask-length",
default="subword",
type=str,
choices=["subword", "word", "span-poisson"],
help="mask length to choose",
)
parser.add_argument(
"--replace-length",
default=-1,
type=int,
help="when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)",
)
parser.add_argument(
"--max-source-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol("<mask>")
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
dataset = StripTokenDataset(dataset, self.dictionary.eos())
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s> and one for </s>
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
document_sep_len=0,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, self.source_dictionary.eos())
mask_whole_words = (
get_whole_word_mask(self.args, self.source_dictionary)
if self.args.mask_length != "subword"
else None
)
self.datasets[split] = DenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
)
logger.info(
"Split: {0}, Loaded {1} samples of denoising_dataset".format(
split,
len(self.datasets[split]),
)
)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We assume that the input begins with a
bos symbol (`<s>`) and ends with an eos symbol (`</s>`).
"""
pad = self.source_dictionary.pad()
eos = self.source_dictionary.eos()
src_dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=self.args.tokens_per_sample - 2, # for <s> and </s>
pad=pad,
eos=eos,
break_mode=self.args.sample_break_mode,
document_sep_len=0,
)
prev_output_tokens = PrependTokenDataset(
StripTokenDataset(src_dataset, eos), eos
)
src_dataset = PadDataset(src_dataset, pad_idx=pad, left_pad=False)
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": src_dataset,
"src_lengths": NumelDataset(src_dataset, reduce=False),
"prev_output_tokens": PadDataset(
prev_output_tokens, pad_idx=pad, left_pad=False
),
},
"target": src_dataset,
},
sizes=[np.array(src_lengths)],
)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.dictionary
| 8,960 | 31.350181 | 88 | py |
null | DA-Transformer-main/fairseq/tasks/fairseq_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import warnings
from argparse import Namespace
from typing import Any, Callable, Dict, List
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import Dictionary, FairseqDataset, data_utils, encoders, iterators
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.optim.amp_optimizer import AMPOptimizer
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
class StatefulContainer(object):
def __init__(self):
self._state = dict()
self._factories = dict()
def add_factory(self, name, factory: Callable[[], Any]):
self._factories[name] = factory
def merge_state_dict(self, state_dict: Dict[str, Any]):
self._state.update(state_dict)
@property
def state_dict(self) -> Dict[str, Any]:
return self._state
def __getattr__(self, name):
if name not in self._state and name in self._factories:
self._state[name] = self._factories[name]()
if name in self._state:
return self._state[name]
raise AttributeError(f"Task state has no factory for attribute {name}")
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
Tasks have limited statefulness. In particular, state that needs to be
saved to/loaded from checkpoints needs to be stored in the `self.state`
:class:`StatefulContainer` object. For example::
self.state.add_factory("dictionary", self.load_dictionary)
print(self.state.dictionary) # calls self.load_dictionary()
This is necessary so that when loading checkpoints, we can properly
recreate the task state after initializing the task instance.
"""
@classmethod
def add_args(cls, parser):
"""Add task-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
def __init__(self, cfg: FairseqDataclass, **kwargs):
self.cfg = cfg
self.datasets = dict()
self.dataset_to_epoch_iter = dict()
self.state = StatefulContainer()
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, cfg: DictConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (omegaconf.DictConfig): parsed command-line arguments
"""
return cls(cfg, **kwargs)
def has_sharded_data(self, split):
return os.pathsep in getattr(self.cfg, "data", "")
def load_dataset(
self,
split: str,
combine: bool = False,
task_cfg: FairseqDataclass = None,
**kwargs,
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
combine (bool): combines a split segmented into pieces into one dataset
task_cfg (FairseqDataclass): optional task configuration stored in the checkpoint that can be used
to load datasets
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError("Datasets are expected to be of type FairseqDataset")
return self.datasets[split]
def filter_indices_by_size(
self, indices, dataset, max_positions=None, ignore_invalid_inputs=False
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(indices, max_positions)
if len(ignored) > 0:
if not ignore_invalid_inputs:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
logger.warning(
(
"{:,} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def can_reuse_epoch_itr(self, dataset):
# We can reuse the epoch iterator across epochs as long as the dataset
# hasn't disabled it. We default to ``False`` here, although in practice
# this will be ``True`` for most datasets that inherit from
# ``FairseqDataset`` due to the base implementation there.
return getattr(dataset, "can_reuse_epoch_itr_across_epochs", False)
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
skip_remainder_batch=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
skip_remainder_batch (bool, optional): if set, discard the last
batch in each training epoch, as the last batch is often smaller than
local_batch_size * distributed_word_size (default: ``True``).
grouped_shuffling (bool, optional): group batches with each groups
containing num_shards batches and shuffle groups. Reduces difference
between sequence lengths among workers for batches sorted by length.
update_epoch_batch_itr (bool optional): if true then donot use the cached
batch iterator for the epoch
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
can_reuse_epoch_itr = (
not disable_iterator_cache
and not update_epoch_batch_itr
and self.can_reuse_epoch_itr(dataset)
)
if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter:
logger.debug("reusing EpochBatchIterator for epoch {}".format(epoch))
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
skip_remainder_batch=skip_remainder_batch,
grouped_shuffling=grouped_shuffling,
)
if can_reuse_epoch_itr:
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, cfg: FairseqDataclass, from_checkpoint=False):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
cfg (FairseqDataclass): configuration object
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(cfg, self, from_checkpoint)
model = quantization_utils.quantize_model_scalar(model, cfg)
return model
def build_criterion(self, cfg: DictConfig):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
cfg (omegaconf.DictConfig): configration object
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(cfg, self)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
prefix_allowed_tokens_fn=None,
):
"""
Build a :class:`~fairseq.SequenceGenerator` instance for this
task.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
args (fairseq.dataclass.configs.GenerationConfig):
configuration object (dataclass) for generation
extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass
through to SequenceGenerator
prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]):
If provided, this function constrains the beam search to
allowed tokens only at each step. The provided function
should take 2 arguments: the batch ID (`batch_id: int`)
and a unidimensional tensor of token ids (`inputs_ids:
torch.Tensor`). It has to return a `List[int]` with the
allowed tokens for the next generation step conditioned
on the previously generated tokens (`inputs_ids`) and
the batch ID (`batch_id`). This argument is useful for
constrained generation conditioned on the prefix, as
described in "Autoregressive Entity Retrieval"
(https://arxiv.org/abs/2010.00904) and
https://github.com/facebookresearch/GENRE.
"""
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
constrained = getattr(args, "constraints", False)
if prefix_allowed_tokens_fn is None:
prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained:
search_strategy = search.LexicallyConstrainedBeamSearch(
self.target_dictionary, args.constraints
)
elif prefix_allowed_tokens_fn:
search_strategy = search.PrefixConstrainedBeamSearch(
self.target_dictionary, prefix_allowed_tokens_fn
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
extra_gen_cls_kwargs["print_alignment"] = args.print_alignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def optimizer_step(self, optimizer, model, update_num):
optimizer.step()
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
raise NotImplementedError
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints
)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def begin_valid_epoch(self, epoch, model):
"""Hook function called before the start of each validation epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
def state_dict(self):
if self.state is not None:
return self.state.state_dict
return {}
def load_state_dict(self, state_dict: Dict[str, Any]):
if self.state is not None:
self.state.merge_state_dict(state_dict)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
def build_tokenizer(self, args):
"""Build the pre-tokenizer for this task."""
return encoders.build_tokenizer(args)
def build_bpe(self, args):
"""Build the tokenizer for this task."""
return encoders.build_bpe(args)
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
tokens = [
self.source_dictionary.encode_line(
encode_fn(src_str), add_if_not_exist=False
).long()
for src_str in lines
]
lengths = [t.numel() for t in tokens]
return tokens, lengths
class LegacyFairseqTask(FairseqTask):
def __init__(self, args: Namespace):
super().__init__(None)
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def setup_task(cls, args: Namespace, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def has_sharded_data(self, split):
return os.pathsep in getattr(self.args, "data", "")
def build_model(self, args: Namespace, from_checkpoint=False):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(args, self, from_checkpoint)
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args: Namespace):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
| 26,409 | 37.330914 | 110 | py |
null | DA-Transformer-main/fairseq/tasks/frm_text_to_speech.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from fairseq.data.audio.frm_text_to_speech_dataset import FrmTextToSpeechDatasetCreator
from fairseq.tasks import register_task
from fairseq.tasks.text_to_speech import TextToSpeechTask
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
@register_task("frm_text_to_speech")
class FrmTextToSpeechTask(TextToSpeechTask):
@staticmethod
def add_args(parser):
TextToSpeechTask.add_args(parser)
parser.add_argument("--do_chunk", action="store_true", help="train on chunks")
parser.add_argument("--chunk_bound", default=-1, type=int)
parser.add_argument("--chunk_init", default=50, type=int)
parser.add_argument("--chunk_incr", default=5, type=int)
parser.add_argument("--add_eos", action="store_true")
parser.add_argument("--dedup", action="store_true")
parser.add_argument("--ref_fpu", default=-1, type=float)
def load_dataset(self, split, **unused_kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = FrmTextToSpeechDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.src_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
n_frames_per_step=self.args.n_frames_per_step,
speaker_to_id=self.speaker_to_id,
do_chunk=self.args.do_chunk,
chunk_bound=self.args.chunk_bound,
chunk_init=self.args.chunk_init,
chunk_incr=self.args.chunk_incr,
add_eos=self.args.add_eos,
dedup=self.args.dedup,
ref_fpu=self.args.ref_fpu,
)
| 2,093 | 36.392857 | 87 | py |
null | DA-Transformer-main/fairseq/tasks/hubert_pretraining.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import sys
from typing import Dict, List, Optional, Tuple
import numpy as np
from dataclasses import dataclass, field
from fairseq.data import Dictionary, HubertDataset
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from omegaconf import MISSING
logger = logging.getLogger(__name__)
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary) -> None:
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
return self.dictionary.encode_line(
label,
append_eos=False,
add_if_not_exist=False,
)
@dataclass
class HubertPretrainingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
fine_tuning: bool = field(
default=False, metadata={"help": "set to true if fine-tuning Hubert"}
)
labels: List[str] = field(
default_factory=lambda: ["ltr"],
metadata={
"help": (
"extension of the label files to load, frame-level labels for"
" pre-training, and sequence-level label for fine-tuning"
)
},
)
label_dir: Optional[str] = field(
default=None,
metadata={
"help": "if set, looks for labels in this directory instead",
},
)
label_rate: int = field(
default=-1,
metadata={"help": "label frame rate. -1 for sequence label"},
)
sample_rate: int = field(
default=16_000,
metadata={
"help": "target sample rate. audio files will be up/down "
"sampled to this rate"
},
)
normalize: bool = field(
default=False,
metadata={"help": "if set, normalizes input to have 0 mean and unit variance"},
)
enable_padding: bool = field(
default=False,
metadata={"help": "pad shorter samples instead of cropping"},
)
max_keep_size: Optional[int] = field(
default=None,
metadata={"help": "exclude sample longer than this"},
)
max_sample_size: Optional[int] = field(
default=None,
metadata={"help": "max sample size to crop to for batching"},
)
min_sample_size: Optional[int] = field(
default=None,
metadata={"help": "min sample size to crop to for batching"},
)
single_target: Optional[bool] = field(
default=False,
metadata={
"help": "if set, AddTargetDatasets outputs same keys " "as AddTargetDataset"
},
)
random_crop: Optional[bool] = field(
default=True,
metadata={"help": "always crop from the beginning if false"},
)
pad_audio: Optional[bool] = field(
default=False,
metadata={"help": "pad audio to the longest one in the batch if true"},
)
@register_task("hubert_pretraining", dataclass=HubertPretrainingConfig)
class HubertPretrainingTask(FairseqTask):
cfg: HubertPretrainingConfig
def __init__(
self,
cfg: HubertPretrainingConfig,
) -> None:
super().__init__(cfg)
logger.info(f"current directory is {os.getcwd()}")
logger.info(f"HubertPretrainingTask Config {cfg}")
self.cfg = cfg
self.fine_tuning = cfg.fine_tuning
if cfg.fine_tuning:
self.state.add_factory("target_dictionary", self.load_dictionaries)
else:
self.state.add_factory("dictionaries", self.load_dictionaries)
self.blank_symbol = "<s>"
@property
def source_dictionary(self) -> Optional[Dictionary]:
return None
@property
def target_dictionary(self) -> Optional[Dictionary]:
return self.state.target_dictionary
@property
def dictionaries(self) -> List[Dictionary]:
return self.state.dictionaries
@classmethod
def setup_task(
cls, cfg: HubertPretrainingConfig, **kwargs
) -> "HubertPretrainingTask":
return cls(cfg)
def load_dictionaries(self):
label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
dictionaries = [
Dictionary.load(f"{label_dir}/dict.{label}.txt")
for label in self.cfg.labels
]
return dictionaries[0] if self.cfg.fine_tuning else dictionaries
def get_label_dir(self) -> str:
if self.cfg.label_dir is None:
return self.cfg.data
return self.cfg.label_dir
def load_dataset(self, split: str, **kwargs) -> None:
manifest = f"{self.cfg.data}/{split}.tsv"
dicts = [self.target_dictionary] if self.cfg.fine_tuning else self.dictionaries
pad_list = [dict.pad() for dict in dicts]
eos_list = [dict.eos() for dict in dicts]
procs = [LabelEncoder(dict) for dict in dicts]
paths = [f"{self.get_label_dir()}/{split}.{l}" for l in self.cfg.labels]
# hubert v1: pad_audio=True, random_crop=False;
self.datasets[split] = HubertDataset(
manifest,
sample_rate=self.cfg.sample_rate,
label_paths=paths,
label_rates=self.cfg.label_rate,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=self.cfg.max_keep_size,
min_keep_sample_size=self.cfg.min_sample_size,
max_sample_size=self.cfg.max_sample_size,
pad_audio=self.cfg.pad_audio,
normalize=self.cfg.normalize,
store_labels=False,
random_crop=self.cfg.random_crop,
single_target=self.cfg.single_target,
)
def max_positions(self) -> Tuple[int, int]:
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(self, indices: np.array, *args, **kwargs) -> np.array:
return indices
| 6,219 | 31.395833 | 88 | py |
null | DA-Transformer-main/fairseq/tasks/language_modeling.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
Dictionary,
IdDataset,
LMContextWindowDataset,
MonolingualDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
TruncatedDictionary,
data_utils,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import LegacyFairseqTask, register_task
from omegaconf import II
SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"])
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
logger = logging.getLogger(__name__)
@dataclass
class LanguageModelingConfig(FairseqDataclass):
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory"}
)
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="none",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
output_dictionary_size: int = field(
default=-1, metadata={"help": "limit the size of output dictionary"}
)
self_target: bool = field(default=False, metadata={"help": "include self target"})
future_target: bool = field(
default=False, metadata={"help": "include future target"}
)
past_target: bool = field(default=False, metadata={"help": "include past target"})
add_bos_token: bool = field(
default=False, metadata={"help": "prepend beginning of sentence token (<s>)"}
)
max_target_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the target sequence"}
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
pad_to_fixed_length: Optional[bool] = field(
default=False,
metadata={"help": "pad to fixed length"},
)
pad_to_fixed_bsz: Optional[bool] = field(
default=False,
metadata={"help": "boolean to pad to fixed batch size"},
)
# TODO common vars below add to parent
seed: int = II("common.seed")
batch_size: Optional[int] = II("dataset.batch_size")
batch_size_valid: Optional[int] = II("dataset.batch_size_valid")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
data_buffer_size: int = II("dataset.data_buffer_size")
tpu: bool = II("common.tpu")
use_plasma_view: bool = II("common.use_plasma_view")
plasma_path: str = II("common.plasma_path")
@register_task("language_modeling", dataclass=LanguageModelingConfig)
class LanguageModelingTask(LegacyFairseqTask):
"""
Train a language model.
Args:
dictionary (~fairseq.data.Dictionary): the dictionary for the input of
the language model
output_dictionary (~fairseq.data.Dictionary): the dictionary for the
output of the language model. In most cases it will be the same as
*dictionary*, but could possibly be a more limited version of the
dictionary (if ``--output-dictionary-size`` is used).
targets (List[str]): list of the target types that the language model
should predict. Can be one of "self", "future", and "past".
Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate`, :mod:`fairseq-interactive` and
:mod:`fairseq-eval-lm`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
def __init__(self, args, dictionary, output_dictionary=None, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary or dictionary
if targets is None:
targets = ["future"]
self.targets = targets
@classmethod
def setup_dictionary(cls, args, **kwargs):
dictionary = None
output_dictionary = None
if args.data:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(
dictionary, args.output_dictionary_size
)
return (dictionary, output_dictionary)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs)
# upgrade old checkpoints
if getattr(args, "exclude_self_target", False):
args.self_target = False
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args, from_checkpoint=False):
model = super().build_model(args, from_checkpoint)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError(
"Unsupported language modeling target: {}".format(target)
)
return model
def load_dataset(
self, split: str, epoch=1, combine=False, **kwargs
) -> MonolingualDataset:
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, valid1, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each process has its own copy of the raw data (likely to be an np.memmap)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
if dataset is None:
raise FileNotFoundError(f"Dataset not found: {split} ({split_path})")
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
use_plasma_view=self.args.use_plasma_view,
split_path=split_path,
plasma_path=self.args.plasma_path,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
fixed_pad_length = None
if self.args.pad_to_fixed_length:
fixed_pad_length = self.args.tokens_per_sample
pad_to_bsz = None
if self.args.pad_to_fixed_bsz:
pad_to_bsz = (
self.args.batch_size_valid if "valid" in split else self.args.batch_size
)
self.datasets[split] = MonolingualDataset(
dataset=dataset,
sizes=dataset.sizes,
src_vocab=self.dictionary,
tgt_vocab=self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=True,
targets=self.targets,
add_bos_token=self.args.add_bos_token,
fixed_pad_length=fixed_pad_length,
pad_to_bsz=pad_to_bsz,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We prepend an eos token to src_tokens
(or bos if `--add-bos-token` is set) and we append a <pad> to target.
This is convenient both for generation with a prefix and LM scoring.
"""
dataset = StripTokenDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
# remove eos from (end of) target sequence
self.source_dictionary.eos(),
)
src_dataset = PrependTokenDataset(
dataset,
token=(
self.source_dictionary.bos()
if getattr(self.args, "add_bos_token", False)
else self.source_dictionary.eos()
),
)
tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad())
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(
tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False
),
},
sizes=[np.array(src_lengths)],
)
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
# Generation will always be conditioned on bos_token
if getattr(self.args, "add_bos_token", False):
bos_token = self.source_dictionary.bos()
else:
bos_token = self.source_dictionary.eos()
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the language_modeling task is not supported"
)
# SequenceGenerator doesn't use src_tokens directly, we need to
# pass the `prefix_tokens` argument instead
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(bos_token).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
# ensures that every evaluated token has access to a context of at least
# this size, if possible
context_window: int = 0,
):
if context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=self.args.tokens_per_sample,
context_window=context_window,
pad_idx=self.source_dictionary.pad(),
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
| 13,952 | 35.335938 | 91 | py |
null | DA-Transformer-main/fairseq/tasks/legacy_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import numpy as np
from fairseq import tokenizer, utils
from fairseq.data import ConcatDataset, Dictionary, data_utils, indexed_dataset
from fairseq.data.legacy.block_pair_dataset import BlockPairDataset
from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset
from fairseq.data.legacy.masked_lm_dictionary import BertDictionary
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("legacy_masked_lm")
class LegacyMaskedLMTask(LegacyFairseqTask):
"""
Task for training Masked LM (BERT) model.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments"
" per sample for BERT dataset",
)
parser.add_argument(
"--break-mode", default="doc", type=str, help="mode for breaking sentence"
)
parser.add_argument("--shuffle-dataset", action="store_true", default=False)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
@classmethod
def load_dictionary(cls, filename):
return BertDictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
d = BertDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@property
def target_dictionary(self):
return self.dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = BertDictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
loaded_datasets = []
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
logger.info("data_path", data_path)
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
path = os.path.join(data_path, split_k)
ds = indexed_dataset.make_dataset(
path,
impl=self.args.dataset_impl,
fix_lua_indexing=True,
dictionary=self.dictionary,
)
if ds is None:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
with data_utils.numpy_seed(self.seed + k):
loaded_datasets.append(
BlockPairDataset(
ds,
self.dictionary,
ds.sizes,
self.args.tokens_per_sample,
break_mode=self.args.break_mode,
doc_break_size=1,
)
)
logger.info(
"{} {} {} examples".format(data_path, split_k, len(loaded_datasets[-1]))
)
if not combine:
break
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
self.datasets[split] = MaskedLMDataset(
dataset=dataset,
sizes=sizes,
vocab=self.dictionary,
pad_idx=self.dictionary.pad(),
mask_idx=self.dictionary.mask(),
classif_token_idx=self.dictionary.cls(),
sep_token_idx=self.dictionary.sep(),
shuffle=self.args.shuffle_dataset,
seed=self.seed,
)
| 5,010 | 31.751634 | 88 | py |
null | DA-Transformer-main/fairseq/tasks/masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import logging
import os
from omegaconf import MISSING, II, OmegaConf
import numpy as np
from fairseq import utils
from fairseq.data import (
Dictionary,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PrependTokenDataset,
RightPadDataset,
SortDataset,
TokenBlockDataset,
data_utils,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from .language_modeling import SAMPLE_BREAK_MODE_CHOICES, SHORTEN_METHOD_CHOICES
logger = logging.getLogger(__name__)
@dataclass
class MaskedLMConfig(FairseqDataclass):
data: str = field(
default=MISSING,
metadata={
"help": "colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner"
},
)
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="none",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
mask_prob: float = field(
default=0.15,
metadata={"help": "probability of replacing a token with mask"},
)
leave_unmasked_prob: float = field(
default=0.1,
metadata={"help": "probability that a masked token is unmasked"},
)
random_token_prob: float = field(
default=0.1,
metadata={"help": "probability of replacing a token with a random token"},
)
freq_weighted_replacement: bool = field(
default=False,
metadata={"help": "sample random replacement words based on word frequencies"},
)
mask_whole_words: bool = field(
default=False,
metadata={"help": "mask whole words; you may also want to set --bpe"},
)
mask_multiple_length: int = field(
default=1,
metadata={"help": "repeat the mask indices multiple times"},
)
mask_stdev: float = field(
default=0.0,
metadata={"help": "stdev of the mask length"},
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
seed: int = II("common.seed")
include_target_tokens: bool = field(
default=False,
metadata={
"help": "include target tokens in model input. this is used for data2vec"
},
)
@register_task("masked_lm", dataclass=MaskedLMConfig)
class MaskedLMTask(FairseqTask):
cfg: MaskedLMConfig
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
def __init__(self, cfg: MaskedLMConfig, dictionary):
super().__init__(cfg)
self.dictionary = dictionary
# add mask token
self.mask_idx = dictionary.add_symbol("<mask>")
@classmethod
def setup_task(cls, cfg: MaskedLMConfig, **kwargs):
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(cfg, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
dataset = maybe_shorten_dataset(
dataset,
split,
self.cfg.shorten_data_split_list,
self.cfg.shorten_method,
self.cfg.tokens_per_sample,
self.cfg.seed,
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.cfg.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.cfg.sample_break_mode,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
# create masked input and targets
mask_whole_words = (
get_whole_word_mask(self.args, self.source_dictionary)
if self.cfg.mask_whole_words
else None
)
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.cfg.seed,
mask_prob=self.cfg.mask_prob,
leave_unmasked_prob=self.cfg.leave_unmasked_prob,
random_token_prob=self.cfg.random_token_prob,
freq_weighted_replacement=self.cfg.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
mask_multiple_length=self.cfg.mask_multiple_length,
mask_stdev=self.cfg.mask_stdev,
)
with data_utils.numpy_seed(self.cfg.seed):
shuffle = np.random.permutation(len(src_dataset))
target_dataset = RightPadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
)
input_dict = {
"src_tokens": RightPadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
}
if self.cfg.include_target_tokens:
input_dict["target_tokens"] = target_dataset
self.datasets[split] = SortDataset(
NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": input_dict,
"target": target_dataset,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_dataset, reduce=True),
},
sizes=[src_dataset.sizes],
),
sort_order=[
shuffle,
src_dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = RightPadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.cfg.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
pad_idx=self.source_dictionary.pad(),
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": src_dataset,
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| 8,896 | 32.074349 | 87 | py |
null | DA-Transformer-main/fairseq/tasks/multilingual_denoising.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
DenoisingDataset,
Dictionary,
PrependTokenDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
data_utils,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task
from .denoising import DenoisingTask
logger = logging.getLogger(__name__)
@register_task("multilingual_denoising")
class MultilingualDenoisingTask(DenoisingTask):
@staticmethod
def add_args(parser):
DenoisingTask.add_args(parser)
parser.add_argument(
"--multilang-sampling-alpha",
type=float,
default=1.0,
help="smoothing alpha for sample ratios across multiple datasets",
)
parser.add_argument("--add-lang-token", default=False, action="store_true")
parser.add_argument(
"--langs", type=str, help="language ids we are considering", default=None
)
parser.add_argument(
"--no-whole-word-mask-langs",
type=str,
default="",
metavar="N",
help="languages without spacing between words dont support whole word masking",
)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = args.data.split(":")
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
data_path = paths[0]
if args.langs is None:
languages = sorted(
[
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
]
)
else:
languages = args.langs.split(",")
if args.add_lang_token:
for lang in languages:
dictionary.add_symbol("[{}]".format(lang))
logger.info("dictionary: {} types".format(len(dictionary)))
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol("<mask>")
self.langs = args.langs
self.args = args
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob**self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(":")
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
if self.langs is None:
languages = sorted(
[
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
]
)
else:
languages = self.langs.split(",")
for name in languages:
p = os.path.join(data_path, name)
assert os.path.exists(p), "data not found: {}".format(p)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info(
"Language to id mapping: ", {lang: id for id, lang in enumerate(languages)}
)
mask_whole_words = get_whole_word_mask(self.args, self.dictionary)
language_without_segmentations = self.args.no_whole_word_mask_langs.split(",")
lang_datasets = []
for language in languages:
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
end_token = (
self.source_dictionary.index("[{}]".format(language))
if self.args.add_lang_token
else self.source_dictionary.eos()
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s>
pad=self.source_dictionary.pad(),
eos=end_token,
break_mode=self.args.sample_break_mode,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, end_token)
lang_mask_whole_words = (
mask_whole_words
if language not in language_without_segmentations
else None
)
lang_dataset = DenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
lang_mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
eos=None
if not self.args.add_lang_token
else self.source_dictionary.index("[{}]".format(language)),
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
"loaded total {} blocks for all languages".format(
int(dataset_lengths.sum()),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by language: {}".format(
{
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
}
)
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info(
"Up/Down Sampling ratio by language: {}".format(
{
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
}
)
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + "_" + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ",".join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
| 8,756 | 33.341176 | 91 | py |
null | DA-Transformer-main/fairseq/tasks/multilingual_language_modeling.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from omegaconf import II
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
Dictionary,
IdDataset,
LMContextWindowDataset,
MonolingualDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
ResamplingDataset,
SortDataset,
StripTokenDataset,
TokenBlockDataset,
TruncatedDictionary,
data_utils,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import LegacyFairseqTask, register_task
SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"])
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
logger = logging.getLogger(__name__)
def lang_token(lang):
return f"<{lang}>"
@dataclass
class MultilingualLanguageModelingConfig(FairseqDataclass):
# TODO common var add to parent
data: Optional[str] = field(
default=None, metadata={"help": "path to data directory"}
)
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="none",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
output_dictionary_size: int = field(
default=-1, metadata={"help": "limit the size of output dictionary"}
)
self_target: bool = field(default=False, metadata={"help": "include self target"})
future_target: bool = field(
default=False, metadata={"help": "include future target"}
)
past_target: bool = field(default=False, metadata={"help": "include past target"})
add_bos_token: bool = field(
default=False, metadata={"help": "prepend lang id token <dialect>"}
)
max_source_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: Optional[int] = field(
default=None, metadata={"help": "max number of tokens in the target sequence"}
)
pad_to_fixed_length: Optional[bool] = field(
default=False, metadata={"help": "pad to fixed length"}
)
pad_to_fixed_bsz: Optional[bool] = field(
default=False, metadata={"help": "boolean to pad to fixed batch size"}
)
multilang_sampling_alpha: Optional[float] = field(
default=1.0,
metadata={
"help": "smoothing alpha for sample rations across multiple datasets"
},
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
langs: str = field(
default="",
metadata={
"help": "comma-separated list of languages (default: all directories in data path)"
},
)
baseline_model_langs: str = field(
default="",
metadata={
"help": "comma-separated list of languages in the baseline model (default: none)"
},
)
# TODO: legacy parameter kept for compatibility
baseline_model: str = field(
default="",
metadata={"help": "path to the baseline model (default: none)"},
)
lang_to_offline_shard_ratio: str = field(
default="",
metadata={
"help": "absolute path of tsv file location to indicate lang to offline shard ratio.",
},
)
# TODO common vars below add to parent
seed: int = II("common.seed")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
data_buffer_size: int = II("dataset.data_buffer_size")
tpu: bool = II("common.tpu")
batch_size: Optional[int] = II("dataset.batch_size")
batch_size_valid: Optional[int] = II("dataset.batch_size_valid")
train_subset: str = II("common.train_subset")
valid_subset: str = II("common.valid_subset")
@register_task(
"multilingual_language_modeling", dataclass=MultilingualLanguageModelingConfig
)
class MultilingualLanguageModelingTask(LegacyFairseqTask):
"""
Train a language model.
Args:
dictionary (~fairseq.data.Dictionary): the dictionary for the input of
the language model
output_dictionary (~fairseq.data.Dictionary): the dictionary for the
output of the language model. In most cases it will be the same as
*dictionary*, but could possibly be a more limited version of the
dictionary (if ``--output-dictionary-size`` is used).
targets (List[str]): list of the target types that the language model
should predict. Can be one of "self", "future", and "past".
Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate`, :mod:`fairseq-interactive` and
:mod:`fairseq-eval-lm`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
def __init__(self, args, dictionary, output_dictionary=None, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary or dictionary
if targets is None:
targets = ["future"]
self.targets = targets
@staticmethod
def _get_langs(args, epoch=1):
paths = utils.split_paths(args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
languages = sorted(
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
)
if args.langs:
keep_langs = set(args.langs.split(","))
languages = [lang for lang in languages if lang in keep_langs]
assert len(languages) == len(keep_langs)
return languages, data_path
@classmethod
def setup_dictionary(cls, args, **kwargs):
dictionary = None
output_dictionary = None
if args.data:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
if args.add_bos_token:
languages, _ = cls._get_langs(args)
logger.info("----------------")
for lang in languages:
dictionary.add_symbol(lang_token(lang))
logger.info(f"add language token: {lang_token(lang)}")
logger.info("----------------")
logger.info("dictionary: {} types".format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(
dictionary, args.output_dictionary_size
)
return (dictionary, output_dictionary)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs)
# upgrade old checkpoints
if hasattr(args, "exclude_self_target"):
args.self_target = not args.exclude_self_target
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args, from_checkpoint=False):
model = super().build_model(args, from_checkpoint)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError(
f"Unsupported language modeling target: {target} not in {model.supported_targets}"
)
return model
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob**self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split: str, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
languages, data_path = MultilingualLanguageModelingTask._get_langs(
self.args, epoch
)
lang_to_offline_shard_ratio = None
if self.args.lang_to_offline_shard_ratio != "":
lang_to_offline_shard_ratio = {}
assert os.path.exists(
self.args.lang_to_offline_shard_ratio
), "provided offline shard ratio file doesn't exist: {0}".format(
self.args.lang_to_offline_shard_ratio
)
with open(self.args.lang_to_offline_shard_ratio) as fin:
for line in fin:
lang, ratio = line.strip().split("\t")
ratio = float(ratio)
lang_to_offline_shard_ratio[lang] = ratio
logger.info(
"Found offline sharded ratio: %s",
lang_to_offline_shard_ratio,
)
if split == self.args.train_subset:
logger.info(
"Training on {0} languages: {1}".format(len(languages), languages)
)
else:
logger.info(
"Evaluating on {0} languages: {1}".format(len(languages), languages)
)
tokens_per_sample = self.args.tokens_per_sample - int(self.args.add_bos_token)
fixed_pad_length = None
if self.args.pad_to_fixed_length:
fixed_pad_length = self.args.tokens_per_sample
pad_to_bsz = None
if self.args.pad_to_fixed_bsz:
pad_to_bsz = (
self.args.batch_size_valid if "valid" in split else self.args.batch_size
)
lang_datasets = []
for lang_id, language in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
# print('len(dataset) =', len(dataset))
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
tokens_per_sample,
self.args.seed,
)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
src_lang_idx, tgt_lang_idx = None, None
if self.args.add_bos_token:
src_lang_idx = self.dictionary.index(lang_token(language))
tgt_lang_idx = self.output_dictionary.index(lang_token(language))
lang_datasets.append(
MonolingualDataset(
dataset=dataset,
sizes=dataset.sizes,
src_vocab=self.dictionary,
tgt_vocab=self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=True,
targets=self.targets,
fixed_pad_length=fixed_pad_length,
pad_to_bsz=pad_to_bsz,
add_bos_token=self.args.add_bos_token,
src_lang_idx=src_lang_idx,
tgt_lang_idx=tgt_lang_idx,
)
)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
"loaded total {} blocks for all languages".format(
dataset_lengths.sum(),
)
)
if split == self.args.train_subset:
dataset_lengths_ratio_multiplier = np.ones(len(dataset_lengths))
if lang_to_offline_shard_ratio is not None:
dataset_lengths_ratio_multiplier = []
for lang in languages:
assert (
lang in lang_to_offline_shard_ratio
), "Lang: {0} missing in offline shard ratio file: {1}".format(
lang,
self.args.lang_to_offline_shard_ratio,
)
dataset_lengths_ratio_multiplier.append(
lang_to_offline_shard_ratio[lang]
)
dataset_lengths_ratio_multiplier = np.array(
dataset_lengths_ratio_multiplier
)
true_dataset_lengths = (
dataset_lengths * dataset_lengths_ratio_multiplier
)
else:
true_dataset_lengths = dataset_lengths
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(true_dataset_lengths)
logger.info(
"Sample probability by language: %s",
{
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
},
)
size_ratio = (sample_probs * true_dataset_lengths.sum()) / dataset_lengths
# TODO: add an option for shrinking all size ratios to below 1
# if self.args.multilang_sampling_alpha != 1:
# size_ratio /= size_ratio.max()
# Fix numeric errors in size ratio computation
# 0.999999999999999999 -> 1
# 1.000000000000000002 -> 1
for i in range(len(size_ratio)):
size_ratio[i] = round(size_ratio[i], 8)
logger.info(
"Up/Down Sampling ratio by language: %s",
{
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
},
)
logger.info(
"Actual dataset size by language: %s",
{
lang: "{0:.2f}".format(len(lang_datasets[id]))
for id, lang in enumerate(languages)
},
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] > 1.0,
)
for i, d in enumerate(lang_datasets)
]
logger.info(
"Resampled dataset size by language: %s",
{
lang: "{0:.2f}".format(len(resampled_lang_datasets[id]))
for id, lang in enumerate(languages)
},
)
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + "_" + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
# [TODO]: This is hacky for now to print validation ppl for each
# language individually. Maybe need task API changes to allow it
# in more generic ways.
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ",".join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
def build_dataset_for_inference(
self, src_tokens, src_lengths, language="en_XX", **kwargs
):
"""
Generate batches for inference. We prepend an eos token to src_tokens
(or bos if `--add-bos-token` is set) and we append a <pad> to target.
This is convenient both for generation with a prefix and LM scoring.
"""
dataset = StripTokenDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
# remove eos from (end of) target sequence
self.source_dictionary.eos(),
)
src_lang_idx = self.dictionary.index(lang_token(language))
src_dataset = PrependTokenDataset(
dataset,
token=(
(src_lang_idx or self.source_dictionary.bos())
if getattr(self.args, "add_bos_token", False)
else self.source_dictionary.eos()
),
)
max_seq_len = max(src_lengths) + 1
tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad())
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
pad_length=max_seq_len,
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
pad_length=max_seq_len,
),
},
sizes=[np.array(src_lengths)],
)
@torch.no_grad()
def inference_step(
self,
generator,
models,
sample,
language="en_XX",
prefix_tokens=None,
constraints=None,
):
# Generation will always be conditioned on bos_token
if getattr(self.args, "add_bos_token", False):
src_lang_idx = self.dictionary.index(lang_token(language))
bos_token = src_lang_idx or self.source_dictionary.bos()
else:
bos_token = self.source_dictionary.eos()
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the language_modeling task is not supported"
)
# SequenceGenerator doesn't use src_tokens directly, we need to
# pass the `prefix_tokens` argument instead
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(bos_token).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
# ensures that every evaluated token has access to a context of at least
# this size, if possible
context_window: int = 0,
):
if context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=self.args.tokens_per_sample,
context_window=context_window,
pad_idx=self.source_dictionary.pad(),
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
)
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
| 22,960 | 35.562102 | 102 | py |
null | DA-Transformer-main/fairseq/tasks/multilingual_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
ConcatDataset,
Dictionary,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
PrependTokenDataset,
RawLabelDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("multilingual_masked_lm")
class MultiLingualMaskedLMTask(LegacyFairseqTask):
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
)
parser.add_argument(
"--sample-break-mode",
default="complete",
choices=["none", "complete", "complete_doc", "eos"],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.',
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments "
"per sample for BERT dataset",
)
parser.add_argument(
"--mask-prob",
default=0.15,
type=float,
help="probability of replacing a token with mask",
)
parser.add_argument(
"--leave-unmasked-prob",
default=0.1,
type=float,
help="probability that a masked token is unmasked",
)
parser.add_argument(
"--random-token-prob",
default=0.1,
type=float,
help="probability of replacing a token with a random token",
)
parser.add_argument(
"--freq-weighted-replacement",
action="store_true",
help="sample random replacement words based on word frequencies",
)
parser.add_argument(
"--mask-whole-words",
default=False,
action="store_true",
help="mask whole words; you may also want to set --bpe",
)
parser.add_argument(
"--multilang-sampling-alpha",
type=float,
default=1.0,
help="smoothing alpha for sample rations across multiple datasets",
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol("<mask>")
@classmethod
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if bpe is not None:
def is_beginning_of_word(i):
if i < self.source_dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = self.source_dictionary[i]
if tok.startswith("madeupword"):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(self.source_dictionary))))
)
else:
mask_whole_words = None
return mask_whole_words
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob**self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
languages = sorted(
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info(
"Language to id mapping: ", {lang: id for id, lang in enumerate(languages)}
)
mask_whole_words = self._get_whole_word_mask()
lang_datasets = []
for lang_id, language in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.args.seed,
mask_prob=self.args.mask_prob,
leave_unmasked_prob=self.args.leave_unmasked_prob,
random_token_prob=self.args.random_token_prob,
freq_weighted_replacement=self.args.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
)
lang_dataset = NestedDictionaryDataset(
{
"net_input": {
"src_tokens": PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_dataset, reduce=True),
"lang_id": RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
},
sizes=[src_dataset.sizes],
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
"loaded total {} blocks for all languages".format(
dataset_lengths.sum(),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by language: ",
{
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
},
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info(
"Up/Down Sampling ratio by language: ",
{
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
},
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + "_" + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
# [TODO]: This is hacky for now to print validation ppl for each
# language individually. Maybe need task API changes to allow it
# in more generic ways.
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ",".join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = PadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
pad_idx=self.source_dictionary.pad(),
left_pad=False,
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": src_dataset,
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| 12,144 | 34.825959 | 87 | py |
null | DA-Transformer-main/fairseq/tasks/multilingual_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
from collections import OrderedDict
from argparse import ArgumentError
import torch
from fairseq import metrics, options, utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair_dataset
from . import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
def _lang_token(lang: str):
return "__{}__".format(lang)
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, "cannot find language token for lang {}".format(lang)
return idx
@register_task("multilingual_translation")
class MultilingualTranslationTask(LegacyFairseqTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, which indicates the inference langauge direction.
`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to
the same value as training.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left (default: False)')
try:
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
except ArgumentError:
# this might have already been defined. Once we transition this to hydra it should be fine to add it here.
pass
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'],
metavar='SRCTGT',
help='replace beginning-of-sentence in source sentence with source or target '
'language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true',
help='replace beginning-of-sentence in target sentence with target language token')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
@classmethod
def update_args(cls, args):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError(
"--lang-pairs is required. List all the language pairs in the training objective."
)
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(",")
@classmethod
def prepare(cls, args, **kargs):
cls.update_args(args)
sorted_langs = sorted(
list({x for lang_pair in args.lang_pairs for x in lang_pair.split("-")})
)
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dicts[lang] = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(lang))
)
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang])))
return dicts, training
def get_encoder_langtok(self, src_lang, tgt_lang):
if self.args.encoder_langtok is None:
return self.dicts[src_lang].eos()
if self.args.encoder_langtok == "src":
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if not self.args.decoder_langtok:
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(
self,
lang_pair_dataset,
src_eos=None,
src_lang=None,
tgt_eos=None,
tgt_lang=None,
):
if self.args.encoder_langtok is None and not self.args.decoder_langtok:
return lang_pair_dataset
new_src_eos = None
if (
self.args.encoder_langtok is not None
and src_eos is not None
and src_lang is not None
and tgt_lang is not None
):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split("-")
langpair_dataset = load_langpair_dataset(
data_path,
split,
src,
self.dicts[src],
tgt,
self.dicts[tgt],
combine=True,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
return self.alter_dataset_langtok(
langpair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict(
[
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in self.lang_pairs
]
),
eval_key=None
if self.training
else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the multilingual_translation task is not supported"
)
lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang)
return RoundRobinZipDatasets(
OrderedDict(
[
(
lang_pair,
self.alter_dataset_langtok(
LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary
),
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
),
)
]
),
eval_key=lang_pair,
)
def build_model(self, args, from_checkpoint=False):
def check_args():
messages = []
if (
len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs))
!= 0
):
messages.append(
"--lang-pairs should include all the language pairs {}.".format(
args.lang_pairs
)
)
if self.args.encoder_langtok != args.encoder_langtok:
messages.append(
"--encoder-langtok should be {}.".format(args.encoder_langtok)
)
if self.args.decoder_langtok != args.decoder_langtok:
messages.append(
"--decoder-langtok should {} be set.".format(
"" if args.decoder_langtok else "not"
)
)
if len(messages) > 0:
raise ValueError(" ".join(messages))
# Update args -> the fact that the constructor here
# changes the args object doesn't mean you get the same one here
self.update_args(args)
# Check if task args are consistant with model args
check_args()
from fairseq import models
model = models.build_model(args, self, from_checkpoint)
if not isinstance(model, FairseqMultiModel):
raise ValueError(
"MultilingualTranslationTask requires a FairseqMultiModel architecture"
)
return model
def _per_lang_pair_train_loss(
self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad
):
loss, sample_size, logging_output = criterion(
model.models[lang_pair], sample[lang_pair]
)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float)
curr_lang_pairs = [
lang_pair
for lang_pair in self.model_lang_pairs
if sample[lang_pair] is not None and len(sample[lang_pair]) != 0
]
for idx, lang_pair in enumerate(curr_lang_pairs):
def maybe_no_sync():
if (
self.args.distributed_world_size > 1
and hasattr(model, "no_sync")
and idx < len(curr_lang_pairs) - 1
):
return model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
loss, sample_size, logging_output = self._per_lang_pair_train_loss(
lang_pair,
model,
update_num,
criterion,
sample,
optimizer,
ignore_grad,
)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample):
return criterion(model.models[lang_pair], sample[lang_pair])
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float)
for lang_pair in self.eval_lang_pairs:
if (
lang_pair not in sample
or sample[lang_pair] is None
or len(sample[lang_pair]) == 0
):
continue
loss, sample_size, logging_output = self._per_lang_pair_valid_loss(
lang_pair, model, criterion, sample
)
agg_loss += loss.data.item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if self.args.decoder_langtok:
bos_token = _lang_token_index(
self.target_dictionary, self.args.target_lang
)
else:
bos_token = self.target_dictionary.eos()
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=bos_token,
)
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
# pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task
super().reduce_metrics(logging_outputs, criterion)
for k in ["sample_size", "nsentences", "ntokens"]:
metrics.log_scalar(k, sum(l[k] for l in logging_outputs))
@property
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
"""Return the max sentence length allowed by the task."""
if len(self.datasets.values()) == 0:
return {
"%s-%s"
% (self.args.source_lang, self.args.target_lang): (
self.args.max_source_positions,
self.args.max_target_positions,
)
}
return OrderedDict(
[
(key, (self.args.max_source_positions, self.args.max_target_positions))
for split in self.datasets.keys()
for key in self.datasets[split].datasets.keys()
]
)
| 18,165 | 38.235421 | 118 | py |
null | DA-Transformer-main/fairseq/tasks/online_backtranslation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import json
import logging
import math
import os
from argparse import Namespace
from collections import OrderedDict, defaultdict
from pathlib import Path
from typing import Dict, Sequence, Tuple
from argparse import ArgumentError
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import fairseq
from fairseq import metrics, options, utils
from fairseq.data import (
FairseqDataset,
LanguagePairDataset,
NoisingDataset,
PrependTokenDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
data_utils,
encoders,
)
from fairseq.sequence_generator import SequenceGenerator
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
logger = logging.getLogger(__name__)
class PiecewiseLinearFn:
"""Piecewise linear function. Can be configured with a string."""
def __init__(self, pieces: Sequence[Tuple[int, float]]):
assert pieces == sorted(
pieces
), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}"
self.pieces = pieces
def __call__(self, x: int) -> float:
for i, (x_a, y_a) in enumerate(self.pieces[:-1]):
x_b, y_b = self.pieces[i + 1]
if x_a <= x <= x_b:
return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a)
return self.pieces[-1][1]
@staticmethod
def from_string(configuration: str) -> "PiecewiseLinearFn":
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
if isinstance(configuration, float):
return PiecewiseLinearFn([(0, configuration)])
try:
parts = configuration.split(",")
if len(parts) == 1:
v = float(configuration)
return PiecewiseLinearFn([(0, v)])
split = [s.split(":") for s in parts]
pieces = [(int(t), float(v)) for t, v in split]
return PiecewiseLinearFn(pieces)
except Exception:
raise ValueError(
f"Invalid PiecewiseLinearFn configuration: {configuration!r}"
)
@staticmethod
def one() -> "PiecewiseLinearFn":
return PiecewiseLinearFn([(0, 1.0)])
@register_task("online_backtranslation")
class OnlineBackTranslationTask(TranslationTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
# Generic translation args
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('--mono-langs', metavar='MONO_LANGS',
help='monolingual languages for training')
parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS',
help='language pairs for validation')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
try:
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
except ArgumentError:
# this might have already been defined. Once we transition this to hydra it should be fine to add it here.
pass
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# Denoising args
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# Backtranslation args
parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N',
help='back-translation weight')
parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N',
help='denoising auto-encoder weight')
# Evaluation args
parser.add_argument('--generate-one-by-one', action='store_true',
help='generate one sentence at a time for backtranslation')
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, common_dict, mono_langs, valid_lang_pairs):
super().__init__(args, common_dict, common_dict)
self.common_dict = common_dict
self.mono_langs = mono_langs
self.valid_lang_pairs = valid_lang_pairs
self.SHOW_SAMPLES_INTERVAL = 1000
# Start by showing samples
self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL
self.SHOW_SAMPLES_NUMBER = 5
self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt)
self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae)
self.args = args
self.data = utils.split_paths(self.args.data)
if len(self.data) == 1:
shards = list(Path(self.data[0]).glob("shard*"))
if len(shards) > 0:
# keep this as strings, since it can also be a manifold path
old_data = self.data
self.data = [str(shard) for shard in shards]
logging.warning(f"Expanded data directory {old_data} to {self.data}")
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
assert args.mono_langs is not None
mono_langs = args.mono_langs.split(",")
valid_lang_pairs = args.valid_lang_pairs.split(",")
# load dictionary
dict_path = os.path.join(paths[0], "dict.txt")
common_dict = cls.load_dictionary(dict_path)
return cls(args, common_dict, mono_langs, valid_lang_pairs)
def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset:
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split == "train":
data_path = self.data[(epoch - 1) % len(self.data)]
dataset = self.load_train_dataset(data_path)
else:
# valid/test should always be the same.
dataset = self.load_translation_dataset(split, self.data[0])
self.datasets[split] = dataset
return dataset
def load_train_dataset(self, data_path: str) -> FairseqDataset:
"""The training dataset is made of backtranslation dataset and denoising dataset."""
data = []
for lang in self.mono_langs:
train_path = os.path.join(data_path, lang, "train")
# TODO: could we do the BT using denoise sample ?
# this would half the data loading work
data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang)))
data.append(
(f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang))
)
return RoundRobinZipDatasets(OrderedDict(data))
def _langpair_dataset(
self, src: FairseqDataset, tgt: FairseqDataset
) -> LanguagePairDataset:
return LanguagePairDataset(
src,
src.sizes,
self.dictionary,
tgt=tgt,
tgt_sizes=tgt.sizes,
tgt_dict=self.dictionary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
# TODO: should we shuffle ? we are already sorting batch by sizes so ?
# shuffle=True,
)
def _prepend_lang_bos_to_target(
self, dataset: LanguagePairDataset, lang: str
) -> LanguagePairDataset:
bos = _lang_token_index(self.dictionary, lang)
return TransformEosLangPairDataset(
dataset,
src_eos=self.dictionary.eos(),
new_src_eos=self.dictionary.eos(),
tgt_bos=self.dictionary.eos(),
new_tgt_bos=bos,
)
def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset:
"""The BT dataset is generated with (tgt, tgt) pairs.
The actual translation to a (generated_src, tgt) pair
is done on the fly during training.
"""
mono_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
assert mono_dataset is not None, f"No dataset found for {lang}"
mono_dataset_src = PrependTokenDataset(
mono_dataset, _lang_token_index(self.dictionary, lang)
)
mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset)
logger.info(
f"mono_lang = {lang} "
f"lang token index = {_lang_token_index(self.dictionary, lang)} "
f"lang token = {_lang_token(lang)}"
)
mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang)
return mono_dataset_bt
def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset:
"""Classic denoising dataset"""
dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
noisy_dataset = NoisingDataset(
dataset,
self.dictionary,
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noisy_dataset = PrependTokenDataset(
noisy_dataset, _lang_token_index(self.dictionary, lang)
)
clean_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset)
denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang)
return denoising_dataset
def load_translation_dataset(
self, split: str, data_path: str, combine: bool = False
):
# only judging with one language pair for the moment,
# since ConcatDataset doesn't work as expected
assert len(self.valid_lang_pairs) == 1, "For now..."
valid_lang_pair = self.valid_lang_pairs[0]
src, tgt = valid_lang_pair.split("-")
# use the same function than TranslationTask
src_tgt_dt = load_langpair_dataset(
data_path,
split,
src,
self.common_dict,
tgt,
self.common_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
prepend_bos_src=_lang_token_index(self.dictionary, src),
)
src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt)
src_tgt_eos_dt.args = self.args
return src_tgt_eos_dt
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
raise NotImplementedError
def build_model(self, args, from_checkpoint=False):
# torch.autograd.set_detect_anomaly(True)
model = super().build_model(args, from_checkpoint)
add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs)
self.sequence_generators = {}
for mono_lang in self.mono_langs:
self.sequence_generators[mono_lang] = SequenceGenerator(
[model],
tgt_dict=self.dictionary,
beam_size=1,
max_len_a=1.3,
max_len_b=5,
min_len=5,
# keep 1 to be able to prepend bos
max_len=model.max_decoder_positions() - 1,
)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.bleu_sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.common_dict
def display_samples_once_in_a_while(self, smp, mono_lang, other_lang):
self._show_samples_ctr += 1
if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL:
return
self._show_samples_ctr = 0
ln = smp["net_input"]["src_tokens"].shape[0]
logger.info(
f"(r:{self.args.distributed_rank}) : "
f"{other_lang} ---> {mono_lang} "
f"({other_lang} was generated by back-translation.) {ln} samples"
)
for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)):
src_tokens = smp["net_input"]["src_tokens"][i]
tgt_tokens = smp["target"][i]
src_str = self.dictionary.string(src_tokens, "sentencepiece")
tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece")
logger.info(
f"\n{i}\t\t[{other_lang} generated] {src_str}\n"
f"\t\t[{mono_lang} original ] {tgt_str}\n"
f"\t\t[ src tokens] {src_tokens}\n"
)
def backtranslate_sample(self, smp, orig_lang, other_lang) -> None:
"""
* WARNING: smp is modified in place.
* At the start of this function, `smp` has the same input and target:
|--------------------------------------------------------|
| smp['net_input']['src_tokens'] | smp['target'] |
| (from data) __en__ hello world | __en__ hello world |
|--------------------------------------------------------|
* We call generator.generate(smp, bos_token = token("ro")),
and copy the result as input
* At the end, `smp` has the translation to other language.
|--------------------------------------------------------|
| smp['net_input']['src_tokens'] | smp['target'] |
| (generated) __ro__ salut lume | __en__ hello world |
|--------------------------------------------------------|
"""
bos_token = _lang_token_index(self.dictionary, other_lang)
generated = self.sequence_generators[orig_lang].generate(
models=[], sample=smp, bos_token=bos_token
)
max_lngth = max([gn[0]["tokens"].size(0) for gn in generated])
net_input = smp["net_input"]
n_src_tokens = torch.empty(
size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype
)
n_src_lengths = torch.empty(
len(generated), dtype=net_input["src_lengths"].dtype
)
for i, gn in enumerate(generated):
tokens = gn[0]["tokens"]
tokens_size = tokens.size(0)
padding_needed = max_lngth - tokens_size
tokens = torch.cat([tokens.new([bos_token]), tokens])
tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad())
n_src_tokens[i] = tokens
n_src_lengths[i] = tokens_size + 1
device = net_input["src_tokens"].device
# This seems to be important
del net_input["src_tokens"]
del net_input["src_lengths"]
net_input["src_tokens"] = n_src_tokens.to(device)
net_input["src_lengths"] = n_src_lengths.to(device)
def generate(self, smp, model):
model.eval()
orig_lang = (
self.dictionary[smp["net_input"]["src_tokens"][0][0]]
.replace(" ", "")
.replace("_", "")
)
bos_token = smp["net_input"]["prev_output_tokens"][0][0]
with torch.no_grad():
generated = self.sequence_generators[orig_lang].generate(
models=[model], sample=smp, bos_token=bos_token
)
return generated
def get_other_lang(self, lang):
# TODO: allow more complex mapping
if lang != self.mono_langs[0]:
return self.mono_langs[0]
if len(self.mono_langs) == 2:
return self.mono_langs[1]
return self.mono_langs[np.random.randint(1, len(self.mono_langs))]
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
agg_loss, agg_sample_size = 0.0, 0.0
agg_logging_output: Dict[str, float] = defaultdict(float)
dataset_keys = self.datasets["train"].datasets.keys()
weights = {
"BT": self.lambda_bt(update_num),
"DENOISE": self.lambda_dae(update_num),
}
log_keys = {"BT": "bt_", "DENOISE": "dae_"}
for dataset_key in dataset_keys:
smp = sample[dataset_key]
mono_lang, task_subtype = dataset_key.split("-")
if weights[task_subtype] == 0:
continue
if task_subtype == "BT":
with torch.autograd.profiler.record_function("backtranslation"):
model.eval()
# TODO: Could we translate to several language at once ?
# this would allow to share encoder_out and maximize GPU usage.
other_lang = self.get_other_lang(mono_lang)
self.backtranslate_sample(smp, mono_lang, other_lang)
self.display_samples_once_in_a_while(smp, mono_lang, other_lang)
model.train()
# Like in FairseqTask.train_step
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, smp)
loss *= weights[task_subtype]
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
agg_loss += loss.item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[log_keys[task_subtype] + k] += logging_output[k]
agg_logging_output[k] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def get_bos_token_from_sample(self, sample):
net_input = sample["net_input"]
source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item()
source_lang_token = self.dictionary[source_lang_token_id].replace("_", "")
target_lang_token_id = _lang_token_index(
self.dictionary, self.get_other_lang(source_lang_token)
)
return target_lang_token_id
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs)
if bt_sample_size:
bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs)
bt_loss_sum *= 1 / bt_sample_size / math.log(2)
metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3)
bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs)
bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs)
bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2)
metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3)
metrics.log_derived(
"bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg)
)
dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs)
if dae_sample_size:
dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs)
dae_loss_sum *= 1 / dae_sample_size / math.log(2)
metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3)
dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs)
dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs)
dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2)
metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3)
metrics.log_derived(
"dae_ppl",
lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg),
)
@torch.no_grad()
def extend_embedding(
emb: nn.Module, new_vocab_size: int, copy_from_token_id: int
) -> None:
old_emb_data = emb.weight.data
(old_vocab_size, dim) = old_emb_data.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
emb.weight.data = torch.zeros((new_vocab_size, dim))
emb.weight.data[:old_vocab_size, :] = old_emb_data
# initialize new embeddings
emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id]
if hasattr(emb, "num_embeddings"):
emb.num_embeddings = new_vocab_size
if hasattr(emb, "out_features"):
emb.out_features = new_vocab_size
if getattr(emb, "bias", None) is None:
return
# Fix the bias.
# Bias shape can be different from the previous vocab size
# if the weight matrix was shared and alread extended but not the bias.
(old_vocab_size,) = emb.bias.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
old_bias = emb.bias.data
new_bias = torch.zeros(
(new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device
)
new_bias[:old_vocab_size] = old_bias
emb.bias.data = new_bias
def add_secial_tokens_to_dict_and_model(
dictionary: "fairseq.data.Dictionary",
model: nn.Module,
mono_langs: Sequence[str],
) -> None:
embs = model.encoder.embed_tokens
vocab_size, embedding_dim = embs.weight.shape
# The model may or may not have a '<mask>' embedding yet
assert (
len(dictionary) <= vocab_size <= len(dictionary) + 1
), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})"
# TODO: we should reuse the pretrained model dict which already has <mask>
dictionary.add_symbol("<mask>")
for lang in mono_langs:
lang_token = _lang_token(lang)
dictionary.add_symbol(lang_token)
logger.info(
f"dictionary: {len(dictionary)} -> {vocab_size} tokens "
f"after adding {len(mono_langs)} lang tokens."
)
if len(dictionary) <= vocab_size:
return
extend_embedding(embs, len(dictionary), dictionary.bos())
dec_embs = model.decoder.embed_tokens
extend_embedding(dec_embs, len(dictionary), dictionary.bos())
lm_head = model.decoder.output_projection
extend_embedding(lm_head, len(dictionary), dictionary.bos())
assert lm_head.weight.shape == (len(dictionary), embedding_dim)
def _lang_token(lang: str) -> str:
return f"__{lang}__"
def _lang_token_index(dictionary, lang: str) -> int:
return dictionary.index(_lang_token(lang))
@contextlib.contextmanager
def assert_weights_have_changed(model: nn.Module):
def checksum(model: nn.Module) -> float:
return sum(p.sum().item() for p in model.parameters())
initial_checksum = checksum(model)
yield model
final_checksum = checksum(model)
logger.info(
f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}"
)
assert initial_checksum != final_checksum, "Model hasn't changed !"
| 28,618 | 40.901903 | 118 | py |
null | DA-Transformer-main/fairseq/tasks/semisupervised_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from collections import OrderedDict
from fairseq import utils
from fairseq.data import (
BacktranslationDataset,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
data_utils,
indexed_dataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.sequence_generator import SequenceGenerator
from . import register_task
from .multilingual_translation import MultilingualTranslationTask
logger = logging.getLogger(__name__)
def _get_bt_dataset_key(lang_pair):
return "bt:" + lang_pair
def _get_denoising_dataset_key(lang_pair):
return "denoising:" + lang_pair
# ported from UnsupervisedMT
def parse_lambda_config(x):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
split = x.split(",")
if len(split) == 1:
return float(x), None
else:
split = [s.split(os.pathsep) for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(
int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1)
)
return float(split[0][1]), [(int(k), float(v)) for k, v in split]
@register_task("semisupervised_translation")
class SemisupervisedTranslationTask(MultilingualTranslationTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, instead of `--lang-pairs`.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--lambda-parallel-config', default="1.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (parallel data). '
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-denoising-config', default="0.0", type=str, metavar='CONFIG',
help='Cross-entropy reconstruction coefficient (denoising autoencoding)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-otf-bt-config', default="0.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N',
help='beam size used in beam search of online back-translation')
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
self.lambda_parallel, self.lambda_parallel_steps = parse_lambda_config(
args.lambda_parallel_config
)
self.lambda_otf_bt, self.lambda_otf_bt_steps = parse_lambda_config(
args.lambda_otf_bt_config
)
self.lambda_denoising, self.lambda_denoising_steps = parse_lambda_config(
args.lambda_denoising_config
)
if self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None:
denoising_lang_pairs = [
"%s-%s" % (tgt, tgt)
for tgt in {lang_pair.split("-")[1] for lang_pair in args.lang_pairs}
]
self.model_lang_pairs = self.model_lang_pairs + denoising_lang_pairs
self.backtranslate_datasets = {}
self.backtranslators = {}
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = MultilingualTranslationTask.prepare(args, **kwargs)
return cls(args, dicts, training)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def split_exists(split, src, tgt, lang):
if src is not None:
filename = os.path.join(
data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)
)
else:
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, src, tgt)
)
return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl)
def load_indexed_dataset(path, dictionary):
return data_utils.load_indexed_dataset(
path, dictionary, self.args.dataset_impl
)
# load parallel datasets
src_datasets, tgt_datasets = {}, {}
if (
self.lambda_parallel > 0.0
or self.lambda_parallel_steps is not None
or not split.startswith("train")
):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
if split_exists(split, src, tgt, src):
prefix = os.path.join(
data_path, "{}.{}-{}.".format(split, src, tgt)
)
elif split_exists(split, tgt, src, src):
prefix = os.path.join(
data_path, "{}.{}-{}.".format(split, tgt, src)
)
else:
continue
src_datasets[lang_pair] = load_indexed_dataset(
prefix + src, self.dicts[src]
)
tgt_datasets[lang_pair] = load_indexed_dataset(
prefix + tgt, self.dicts[tgt]
)
logger.info(
"parallel-{} {} {} examples".format(
data_path, split, len(src_datasets[lang_pair])
)
)
if len(src_datasets) == 0:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
# back translation datasets
backtranslate_datasets = {}
if (
self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None
) and split.startswith("train"):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
if not split_exists(split, tgt, None, tgt):
raise FileNotFoundError(
"Dataset not found: backtranslation {} ({})".format(
split, data_path
)
)
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, tgt, tgt)
)
dataset = load_indexed_dataset(filename, self.dicts[tgt])
lang_pair_dataset_tgt = LanguagePairDataset(
dataset,
dataset.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
lang_pair_dataset = LanguagePairDataset(
dataset,
dataset.sizes,
src_dict=self.dicts[src],
tgt=dataset,
tgt_sizes=dataset.sizes,
tgt_dict=self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
backtranslate_datasets[lang_pair] = BacktranslationDataset(
tgt_dataset=self.alter_dataset_langtok(
lang_pair_dataset_tgt,
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_lang=src,
),
backtranslation_fn=self.backtranslators[lang_pair],
src_dict=self.dicts[src],
tgt_dict=self.dicts[tgt],
output_collater=self.alter_dataset_langtok(
lang_pair_dataset=lang_pair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
).collater,
)
logger.info(
"backtranslate-{}: {} {} {} examples".format(
tgt,
data_path,
split,
len(backtranslate_datasets[lang_pair]),
)
)
self.backtranslate_datasets[lang_pair] = backtranslate_datasets[
lang_pair
]
# denoising autoencoder
noising_datasets = {}
if (
self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None
) and split.startswith("train"):
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split("-")
if not split_exists(split, tgt, None, tgt):
continue
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, tgt, tgt)
)
tgt_dataset1 = load_indexed_dataset(filename, self.dicts[tgt])
tgt_dataset2 = load_indexed_dataset(filename, self.dicts[tgt])
noising_dataset = NoisingDataset(
tgt_dataset1,
self.dicts[tgt],
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noising_datasets[lang_pair] = self.alter_dataset_langtok(
LanguagePairDataset(
noising_dataset,
tgt_dataset1.sizes,
self.dicts[tgt],
tgt_dataset2,
tgt_dataset2.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
logger.info(
"denoising-{}: {} {} {} examples".format(
tgt,
data_path,
split,
len(noising_datasets[lang_pair]),
)
)
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split("-")
src_dataset, tgt_dataset = src_datasets[lang_pair], tgt_datasets[lang_pair]
return self.alter_dataset_langtok(
LanguagePairDataset(
src_dataset,
src_dataset.sizes,
self.dicts[src],
tgt_dataset,
tgt_dataset.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
self.dicts[src].eos(),
src,
self.dicts[tgt].eos(),
tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict(
[
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in src_datasets.keys()
]
+ [
(_get_bt_dataset_key(lang_pair), dataset)
for lang_pair, dataset in backtranslate_datasets.items()
]
+ [
(_get_denoising_dataset_key(lang_pair), dataset)
for lang_pair, dataset in noising_datasets.items()
]
),
eval_key=None
if self.training
else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_model(self, args, from_checkpoint=False):
from fairseq import models
model = models.build_model(args, self, from_checkpoint)
if not isinstance(model, FairseqMultiModel):
raise ValueError(
"SemisupervisedTranslationTask requires a FairseqMultiModel architecture"
)
# create SequenceGenerator for each model that has backtranslation dependency on it
self.sequence_generators = {}
if (
self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None
) and self.training:
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
key = "{}-{}".format(tgt, src)
self.sequence_generators[key] = SequenceGenerator(
[model.models[key]],
tgt_dict=self.dicts[src],
beam_size=args.bt_beam_size,
max_len_a=args.bt_max_len_a,
max_len_b=args.bt_max_len_b,
)
decoder_lang_tok_idx = self.get_decoder_langtok(src)
def backtranslate_fn(
sample,
model=model.models[key],
bos_token=decoder_lang_tok_idx,
sequence_generator=self.sequence_generators[key],
):
return sequence_generator.generate(
[model],
sample,
bos_token=bos_token,
)
self.backtranslators[lang_pair] = backtranslate_fn
return model
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
if update_num > 0:
self.update_step(update_num)
agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, {}
def forward_backward(model, samples, logging_output_key, weight):
nonlocal agg_loss, agg_sample_size, agg_logging_output
if samples is None or len(samples) == 0:
return
loss, sample_size, logging_output = criterion(model, samples)
if ignore_grad:
loss *= 0
else:
loss *= weight
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[logging_output_key] += logging_output[k]
if self.lambda_parallel > 0.0:
for lang_pair in self.lang_pairs:
forward_backward(
model.models[lang_pair],
sample[lang_pair],
lang_pair,
self.lambda_parallel,
)
if self.lambda_otf_bt > 0.0:
for lang_pair in self.lang_pairs:
sample_key = _get_bt_dataset_key(lang_pair)
forward_backward(
model.models[lang_pair],
sample[sample_key],
sample_key,
self.lambda_otf_bt,
)
if self.lambda_denoising > 0.0:
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split("-")
sample_key = _get_denoising_dataset_key(lang_pair)
forward_backward(
model.models["{0}-{0}".format(tgt)],
sample[sample_key],
sample_key,
self.lambda_denoising,
)
return agg_loss, agg_sample_size, agg_logging_output
def update_step(self, num_updates):
def lambda_step_func(config, n_iter):
"""
Update a lambda value according to its schedule configuration.
"""
ranges = [
i
for i in range(len(config) - 1)
if config[i][0] <= n_iter < config[i + 1][0]
]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
if self.lambda_parallel_steps is not None:
self.lambda_parallel = lambda_step_func(
self.lambda_parallel_steps, num_updates
)
if self.lambda_denoising_steps is not None:
self.lambda_denoising = lambda_step_func(
self.lambda_denoising_steps, num_updates
)
if self.lambda_otf_bt_steps is not None:
self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates)
| 20,415 | 41.00823 | 119 | py |
null | DA-Transformer-main/fairseq/tasks/sentence_prediction.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import contextlib
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import MISSING, II, open_dict, OmegaConf
import numpy as np
from fairseq.data import (
ConcatSentencesDataset,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
OffsetTokensDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
RollDataset,
SortDataset,
StripTokenDataset,
data_utils,
)
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import FairseqDataclass, FairseqTask, register_task
from fairseq.dataclass import ChoiceEnum
logger = logging.getLogger(__name__)
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
@dataclass
class SentencePredictionConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
num_classes: int = field(
default=-1,
metadata={"help": "number of classes or regression targets"},
)
init_token: Optional[int] = field(
default=None,
metadata={"help": "add token at the beginning of each batch item"},
)
separator_token: Optional[int] = field(
default=None,
metadata={"help": "add separator token between inputs"},
)
no_shuffle: bool = field(
default=False,
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed tokens_per_sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
add_prev_output_tokens: bool = field(
default=False,
metadata={
"help": "add prev_output_tokens to sample, used for encoder-decoder arch"
},
)
max_positions: int = field(
default=512,
metadata={"help": "max tokens per example"},
)
regression_target: bool = II("criterion.regression_target")
classification_head_name: str = II("criterion.classification_head_name")
seed: int = II("common.seed")
@register_task("sentence_prediction", dataclass=SentencePredictionConfig)
class SentencePredictionTask(FairseqTask):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
def __init__(self, cfg, data_dictionary, label_dictionary):
super().__init__(cfg)
self.dictionary = data_dictionary
self._label_dictionary = label_dictionary
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, cfg, **kwargs):
assert cfg.num_classes > 0, "Must set task.num_classes"
# load data dictionary
data_dict = cls.load_dictionary(
os.path.join(cfg.data, "input0", "dict.txt"),
)
logger.info("[input] dictionary: {} types".format(len(data_dict)))
# load label dictionary
if not cfg.regression_target:
label_dict = cls.load_dictionary(
os.path.join(cfg.data, "label", "dict.txt"),
)
logger.info("[label] dictionary: {} types".format(len(label_dict)))
else:
label_dict = data_dict
return cls(cfg, data_dict, label_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(key, split):
return os.path.join(self.cfg.data, key, split)
def make_dataset(key, dictionary):
split_path = get_path(key, split)
try:
dataset = data_utils.load_indexed_dataset(
split_path,
dictionary,
combine=combine,
)
except Exception as e:
if "StorageException: [404] Path not found" in str(e):
logger.warning(f"dataset {e} not found")
dataset = None
else:
raise e
return dataset
input0 = make_dataset("input0", self.source_dictionary)
assert input0 is not None, "could not find dataset: {}".format(
get_path("input0", split)
)
input1 = make_dataset("input1", self.source_dictionary)
if self.cfg.init_token is not None:
input0 = PrependTokenDataset(input0, self.cfg.init_token)
if input1 is None:
src_tokens = input0
else:
if self.cfg.separator_token is not None:
input1 = PrependTokenDataset(input1, self.cfg.separator_token)
src_tokens = ConcatSentencesDataset(input0, input1)
with data_utils.numpy_seed(self.cfg.seed):
shuffle = np.random.permutation(len(src_tokens))
src_tokens = maybe_shorten_dataset(
src_tokens,
split,
self.cfg.shorten_data_split_list,
self.cfg.shorten_method,
self.max_positions(),
self.cfg.seed,
)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens,
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": NumelDataset(src_tokens, reduce=False),
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
}
if self.cfg.add_prev_output_tokens:
prev_tokens_dataset = RightPadDataset(
RollDataset(src_tokens, 1),
pad_idx=self.dictionary.pad(),
)
dataset["net_input"].update(
prev_output_tokens=prev_tokens_dataset,
)
if not self.cfg.regression_target:
label_dataset = make_dataset("label", self.label_dictionary)
if label_dataset is not None:
dataset.update(
target=OffsetTokensDataset(
StripTokenDataset(
label_dataset,
id_to_strip=self.label_dictionary.eos(),
),
offset=-self.label_dictionary.nspecial,
)
)
else:
label_path = "{0}.label".format(get_path("label", split))
if os.path.exists(label_path):
def parse_regression_target(i, line):
values = line.split()
assert (
len(values) == self.cfg.num_classes
), f'expected num_classes={self.cfg.num_classes} regression target values on line {i}, found: "{line}"'
return [float(x) for x in values]
with open(label_path) as h:
dataset.update(
target=RawLabelDataset(
[
parse_regression_target(i, line.strip())
for i, line in enumerate(h.readlines())
]
)
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[src_tokens.sizes],
)
if self.cfg.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, cfg, from_checkpoint=False):
from fairseq import models
with open_dict(cfg) if OmegaConf.is_config(cfg) else contextlib.ExitStack():
cfg.max_positions = self.cfg.max_positions
model = models.build_model(cfg, self, from_checkpoint)
model.register_classification_head(
self.cfg.classification_head_name,
num_classes=self.cfg.num_classes,
)
return model
def max_positions(self):
return self.cfg.max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
@property
def label_dictionary(self):
return self._label_dictionary
| 9,201 | 31.062718 | 123 | py |
null | DA-Transformer-main/fairseq/tasks/sentence_ranking.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq import utils
from fairseq.data import (
ConcatSentencesDataset,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
TruncateDataset,
data_utils,
)
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("sentence_ranking")
class SentenceRankingTask(LegacyFairseqTask):
"""
Ranking task on multiple sentences.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", metavar="FILE", help="file prefix for data")
parser.add_argument(
"--num-classes", type=int, help="number of sentences to be ranked"
)
parser.add_argument(
"--init-token",
type=int,
help="add token at the beginning of each batch item",
)
parser.add_argument(
"--separator-token", type=int, help="add separator token between inputs"
)
parser.add_argument("--no-shuffle", action="store_true")
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
parser.add_argument(
"--max-option-length", type=int, help="max length for each option"
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
@classmethod
def load_dictionary(cls, args, filename, source=True):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert (
args.criterion == "sentence_ranking"
), "Must set --criterion=sentence_ranking"
# load data dictionary
data_dict = cls.load_dictionary(
args,
os.path.join(args.data, "input0", "dict.txt"),
source=True,
)
logger.info("[input] dictionary: {} types".format(len(data_dict)))
return SentenceRankingTask(args, data_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
return dataset
input0 = make_dataset("input0", self.source_dictionary)
input_options = [
make_dataset("input{idx}".format(idx=idx + 1), self.source_dictionary)
for idx in range(self.args.num_classes)
]
if self.args.separator_token is not None:
input0 = PrependTokenDataset(input0, self.args.separator_token)
src_tokens = []
for input_option in input_options:
if self.args.init_token is not None:
input_option = PrependTokenDataset(input_option, self.args.init_token)
if self.args.max_option_length is not None:
input_option = TruncateDataset(
input_option, self.args.max_option_length
)
src_token = ConcatSentencesDataset(input_option, input0)
src_token = maybe_shorten_dataset(
src_token,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.max_positions,
self.args.seed,
)
src_tokens.append(src_token)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens[0]))
dataset = {
"id": IdDataset(),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens[0], reduce=True),
}
for src_token_idx in range(len(src_tokens)):
dataset.update(
{
"net_input{idx}".format(idx=src_token_idx + 1): {
"src_tokens": RightPadDataset(
src_tokens[src_token_idx],
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": NumelDataset(
src_tokens[src_token_idx], reduce=False
),
}
}
)
label_path = "{}.label".format(get_path("label", split))
if os.path.exists(label_path):
with open(label_path) as h:
dataset.update(
target=RawLabelDataset([int(x.strip()) for x in h.readlines()])
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args, from_checkpoint=False):
from fairseq import models
model = models.build_model(args, self, from_checkpoint)
model.register_classification_head(
getattr(args, "ranking_head_name", "sentence_classification_head"),
num_classes=1,
)
return model
def max_positions(self):
return self.args.max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| 7,037 | 30.990909 | 86 | py |
null | DA-Transformer-main/fairseq/tasks/simultaneous_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from fairseq.tasks import register_task
from fairseq.tasks.speech_to_text import SpeechToTextTask
from fairseq.tasks.translation import TranslationTask, TranslationConfig
try:
import examples.simultaneous_translation # noqa
import_successful = True
except BaseException:
import_successful = False
logger = logging.getLogger(__name__)
def check_import(flag):
if not flag:
raise ImportError(
"'examples.simultaneous_translation' is not correctly imported. "
"Please considering `pip install -e $FAIRSEQ_DIR`."
)
@register_task("simul_speech_to_text")
class SimulSpeechToTextTask(SpeechToTextTask):
def __init__(self, args, tgt_dict):
check_import(import_successful)
super().__init__(args, tgt_dict)
@register_task("simul_text_to_text", dataclass=TranslationConfig)
class SimulTextToTextTask(TranslationTask):
def __init__(self, cfg, src_dict, tgt_dict):
check_import(import_successful)
super().__init__(cfg, src_dict, tgt_dict)
| 1,226 | 28.214286 | 77 | py |
null | DA-Transformer-main/fairseq/tasks/speech_to_speech.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import json
import logging
import math
from pathlib import Path
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.data.audio.data_cfg import S2SDataConfig, MultitaskConfig
from fairseq.data.audio.speech_to_speech_dataset import SpeechToSpeechDatasetCreator
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion
logger = logging.getLogger(__name__)
class StackUnitSequenceGenerator(nn.Module):
def __init__(self, tgt_dict, vocab_size):
super().__init__()
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos()
self.unk = tgt_dict.unk()
self.offset = len(tgt_dict) - vocab_size
self.vocab_size = vocab_size
def pack_units(self, input: torch.Tensor, n_frames_per_step) -> torch.Tensor:
if n_frames_per_step <= 1:
return input
bsz, _, n = input.shape
assert n == n_frames_per_step
scale = [
pow(self.vocab_size, n_frames_per_step - 1 - i)
for i in range(n_frames_per_step)
]
scale = torch.LongTensor(scale).squeeze(0).to(input.device)
mask = input >= self.offset
res = ((input - self.offset) * scale * mask).sum(dim=2) + self.offset
return res
@torch.no_grad()
def generate(self, models, sample, **kwargs):
# currently only support viterbi search for stacked units
model = models[0]
model.eval()
max_len = model.max_decoder_positions()
# TODO: incorporate max_len_a and max_len_b
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len, _ = src_tokens.size()
n_frames_per_step = model.decoder.n_frames_per_step
# initialize
encoder_out = model.forward_encoder(
src_tokens, src_lengths, speaker=sample["speaker"]
)
incremental_state = {}
pred_out, attn, scores = [], [], []
finished = src_tokens.new_zeros((bsz,)).bool()
prev_output_tokens = src_lengths.new_zeros((bsz, 1)).long().fill_(self.eos)
for _ in range(max_len):
cur_out, cur_extra = model.forward_decoder(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
)
lprobs = model.get_normalized_probs([cur_out], log_probs=True)
# never select pad, unk
lprobs[:, :, self.pad] = -math.inf
lprobs[:, :, self.unk] = -math.inf
cur_pred_lprob, cur_pred_out = torch.max(lprobs, dim=2)
scores.append(cur_pred_lprob)
pred_out.append(cur_pred_out)
prev_output_tokens = torch.cat(
(
prev_output_tokens,
self.pack_units(
cur_pred_out.view(bsz, 1, n_frames_per_step), n_frames_per_step
),
),
dim=1,
)
attn.append(cur_extra["attn"][0])
cur_finished = torch.any(cur_pred_out.squeeze(1) == self.eos, dim=1)
finished = finished | cur_finished
if finished.sum().item() == bsz:
break
pred_out = torch.cat(pred_out, dim=1).view(bsz, -1)
attn = torch.cat(attn, dim=2)
alignment = attn.max(dim=1)[1]
attn = attn.repeat_interleave(n_frames_per_step, dim=2)
alignment = alignment.repeat_interleave(n_frames_per_step, dim=1)
scores = torch.cat(scores, dim=1)
eos_idx = (pred_out == self.eos).nonzero(as_tuple=True)
out_lens = src_lengths.new_zeros((bsz,)).long().fill_(max_len)
for b, l in zip(eos_idx[0], eos_idx[1]):
out_lens[b] = min(l, out_lens[b])
hypos = [
[
{
"tokens": pred_out[b, :out_len],
"attn": attn[b, :, :out_len],
"alignment": alignment[b, :out_len],
"positional_scores": scores[b, :out_len],
"score": utils.item(scores[b, :out_len].sum().data),
}
]
for b, out_len in zip(range(bsz), out_lens)
]
return hypos
@register_task("speech_to_speech")
class SpeechToSpeechTask(LegacyFairseqTask):
@classmethod
def add_args(cls, parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument(
"--target-is-code",
action="store_true",
help="set if target is discrete unit instead of spectrogram",
)
parser.add_argument(
"--target-code-size", type=int, default=None, help="# discrete units"
)
parser.add_argument(
"--n-frames-per-step",
type=int,
default=1,
help="# stacked frames, use 0 for reduced discrete unit sequence",
)
parser.add_argument(
"--multitask-config-yaml",
type=str,
default=None,
help="Configuration YAML filename for the multitasks (under manifest root)",
)
parser.add_argument("--eval-inference", action="store_true")
parser.add_argument(
"--eval-args",
type=str,
default="{}",
help='generation args for speech-to-unit model , e.g., \'{"beam": 5, "max_len_a": 1}\', as JSON string',
)
parser.add_argument("--eos-prob-threshold", type=float, default=0.5)
parser.add_argument(
"--mcd-normalize-type",
type=str,
default="targ",
choices=["targ", "pred", "path"],
)
parser.add_argument(
"--vocoder",
type=str,
default="griffin_lim",
choices=["griffin_lim", "hifigan", "code_hifigan"],
)
parser.add_argument("--spec-bwd-max-iter", type=int, default=8)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2SDataConfig(Path(args.data) / args.config_yaml)
self.multitask_tasks = {}
if getattr(args, "multitask_config_yaml", None) is not None:
multitask_cfg = MultitaskConfig(
Path(args.data) / args.multitask_config_yaml
)
for task_name, task_config in multitask_cfg.get_all_tasks().items():
self.multitask_tasks[task_name] = DummyMultiTask(
task_config, task_config.tgt_dict
)
@classmethod
def setup_task(cls, args, **kwargs):
tgt_dict = None
if args.target_is_code:
assert args.target_code_size is not None
tgt_dict = Dictionary()
for i in range(args.target_code_size):
tgt_dict.add_symbol(str(i))
logger.info(f"dictionary size: " f"{len(tgt_dict):,}")
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
assert args.n_frames_per_step >= 1
assert (
not args.eval_inference
or (args.target_is_code and args.vocoder == "code_hifigan")
or (not args.target_is_code and args.vocoder != "code_hifigan")
)
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if len(self.multitask_tasks) > 0:
if self.args.target_is_code and args._name != "speech_to_unit":
raise ValueError(
"set --criterion speech_to_unit for speech-to-unit loss with multitask"
)
elif not self.args.target_is_code and args._name != "speech_to_spectrogram":
raise ValueError(
"set --criterion speech_to_spectrogram for speech-to-spectrogram loss with multitask"
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
self.datasets[split] = SpeechToSpeechDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
is_train_split=split.startswith("train"),
epoch=epoch,
seed=self.args.seed,
target_is_code=self.args.target_is_code,
target_dictionary=self.target_dictionary,
n_frames_per_step=self.args.n_frames_per_step,
multitask=self.multitask_tasks,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return None
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args, from_checkpoint=False):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_transformed_channels
args.target_speaker_embed = self.data_cfg.target_speaker_embed is not None
args.n_frames_per_step = self.args.n_frames_per_step
model = super().build_model(args, from_checkpoint)
if len(self.multitask_tasks) > 0:
from fairseq.models.speech_to_speech.s2s_transformer import (
S2STransformerMultitaskModelBase,
)
assert isinstance(model, S2STransformerMultitaskModelBase)
if self.args.eval_inference:
self.eval_gen_args = json.loads(self.args.eval_args)
self.generator = self.build_generator(
[model], Namespace(**self.eval_gen_args)
)
return model
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if not self.args.target_is_code or self.args.eval_inference:
from fairseq.models.text_to_speech.vocoder import get_vocoder
self.vocoder = get_vocoder(self.args, self.data_cfg)
self.vocoder = (
self.vocoder.cuda()
if torch.cuda.is_available() and not self.args.cpu
else self.vocoder.cpu()
)
if self.args.target_is_code:
if self.args.n_frames_per_step == 1:
seq_generator = super().build_generator(
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=extra_gen_cls_kwargs,
)
else:
assert (
getattr(args, "beam", 1) == 1 and getattr(args, "nbest", 1) == 1
), "only support viterbi search for stacked units"
seq_generator = StackUnitSequenceGenerator(
self.tgt_dict,
self.args.target_code_size,
)
else:
if getattr(args, "teacher_forcing", False):
from fairseq.speech_generator import (
TeacherForcingAutoRegressiveSpeechGenerator,
)
generator = TeacherForcingAutoRegressiveSpeechGenerator
logger.info("Teacher forcing mode for generation")
else:
from fairseq.speech_generator import AutoRegressiveSpeechGenerator
generator = AutoRegressiveSpeechGenerator
seq_generator = generator(
models[0],
self.vocoder,
self.data_cfg,
max_iter=self.args.max_target_positions,
eos_prob_threshold=self.args.eos_prob_threshold,
)
return seq_generator
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
for task_name, task_obj in self.multitask_tasks.items():
criterion.set_multitask_loss_weight(
task_name, task_obj.args.get_loss_weight(update_num)
)
loss, sample_size, logging_output = super().train_step(
sample, model, criterion, optimizer, update_num, ignore_grad
)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_inference:
hypos, inference_losses = self.valid_step_with_inference(
sample, model, self.generator
)
for k, v in inference_losses.items():
assert k not in logging_output
logging_output[k] = v
return loss, sample_size, logging_output
def valid_step_with_inference(self, sample, model, generator):
if self.args.target_is_code:
hypos = generator.generate([model], sample)
tgt_lens = (
sample["target_lengths"] - 1
) * self.args.n_frames_per_step # strip <eos>
for b, (f, l) in enumerate(zip(sample["target"], tgt_lens)):
hypos[b][0]["targ_waveform"] = self.vocoder(
{"code": f[:l] - 4}, # remove <bos>, <pad>, <eos>, <unk>
dur_prediction=self.eval_gen_args.get("dur_prediction", False),
)
if len(hypos[b][0]["tokens"]) > 0:
hypos[b][0]["waveform"] = self.vocoder(
{"code": hypos[b][0]["tokens"] - 4},
dur_prediction=self.eval_gen_args.get("dur_prediction", False),
)
else:
hypos[b][0]["waveform"] = torch.flip(
hypos[b][0]["targ_waveform"], dims=[0]
)
else:
hypos = [
[hypo] for hypo in generator.generate(model, sample, has_targ=True)
]
losses = {
"mcd_loss": 0.0,
"targ_frames": 0.0,
"pred_frames": 0.0,
"path_frames": 0.0,
"nins": 0.0,
"ndel": 0.0,
}
rets = batch_mel_cepstral_distortion(
[hypo[0]["targ_waveform"] for hypo in hypos],
[hypo[0]["waveform"] for hypo in hypos],
self.data_cfg.output_sample_rate,
normalize_type=None,
)
for d, extra in rets:
pathmap = extra[-1]
losses["mcd_loss"] += d.item()
losses["targ_frames"] += pathmap.size(0)
losses["pred_frames"] += pathmap.size(1)
losses["path_frames"] += pathmap.sum().item()
losses["nins"] += (pathmap.sum(dim=1) - 1).sum().item()
losses["ndel"] += (pathmap.sum(dim=0) - 1).sum().item()
losses["norm_frames"] = losses[
f"{getattr(self.args, 'mcd_normalize_type', 'targ')}_frames"
]
return hypos, losses
class DummyMultiTask(LegacyFairseqTask):
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
@property
def target_dictionary(self):
return self.tgt_dict
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
if self.args.decoder_type == "ctc":
model = models[0] # only support single model
encoder_out = model(**sample)
if hasattr(model, "get_logits"):
emissions = model.get_logits(
encoder_out
) # no need to normalize emissions
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return generator.decode(
emissions.transpose(0, 1).float().cpu().contiguous()
)
else:
raise NotImplementedError("only ctc decoder is supported at the moment")
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None
):
if self.args.decoder_type == "ctc":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, self.tgt_dict)
else:
raise NotImplementedError("only ctc decoder is supported at the moment")
| 17,318 | 35.615222 | 116 | py |
null | DA-Transformer-main/fairseq/tasks/speech_to_text.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from argparse import Namespace
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator,
get_features_or_waveform,
)
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("speech_to_text")
class SpeechToTextTask(LegacyFairseqTask):
@classmethod
def add_args(cls, parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(Path(args.data) / args.config_yaml)
self.speaker_to_id = self._get_speaker_to_id()
def _get_speaker_to_id(self):
speaker_to_id = None
speaker_set_filename = self.data_cfg.config.get("speaker_set_filename")
if speaker_set_filename is not None:
speaker_set_path = Path(self.args.data) / speaker_set_filename
with open(speaker_set_path) as f:
speaker_to_id = {r.strip(): i for i, r in enumerate(f)}
return speaker_to_id
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(Path(args.data) / args.config_yaml)
dict_path = Path(args.data) / data_cfg.vocab_filename
if not dict_path.is_file():
raise FileNotFoundError(f"Dict not found: {dict_path.as_posix()}")
tgt_dict = Dictionary.load(dict_path.as_posix())
logger.info(
f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}"
)
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
"target language ID token is prepended as BOS."
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
speaker_to_id=self.speaker_to_id,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return None
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args, from_checkpoint=False):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
args.speaker_to_id = self.speaker_to_id
return super(SpeechToTextTask, self).build_model(args, from_checkpoint)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
"target language ID token is prepended as BOS."
)
lang_token_ids = {
i
for s, i in self.tgt_dict.indices.items()
if SpeechToTextDataset.is_lang_tag(s)
}
if extra_gen_cls_kwargs is None:
extra_gen_cls_kwargs = {}
extra_gen_cls_kwargs["symbols_to_strip_from_output"] = lang_token_ids
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p).shape[0] for p in lines]
return lines, n_frames
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return SpeechToTextDataset(
"interactive", False, self.data_cfg, src_tokens, src_lengths
)
| 5,866 | 34.557576 | 85 | py |
null | DA-Transformer-main/fairseq/tasks/speech_ulm_task.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import sys
import torch
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
from fairseq.data import Dictionary
from fairseq.data.codedataset import ExpressiveCodeDataConfig, CodeDataset
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from omegaconf import MISSING, DictConfig
logger = logging.getLogger(__name__)
class UnitDictionary(Dictionary):
"""
A fixed-sized Dictionary that operates on integer-valued tokens
wth a trivial (identity) token <-> id mapping.
Special symbols (bos, eos, ...) have ids above n_units.
"""
def __init__(
self,
*, # begin keyword-only arguments
n_units,
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
clip=False,
):
self.n_units = n_units
self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
self.clip = clip
self.symbols = []
self.count = []
self.indices = {}
for i in range(n_units):
self.add_symbol(str(i))
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def encode_line(self, line, append_eos=True, prepend_bos=False) -> torch.IntTensor:
words = [int(x) for x in line.split()]
if self.clip:
words = [min(self.n_units - 1, word) for word in words]
if prepend_bos:
words = [self.bos_index] + words
if append_eos:
words.append(self.eos_index)
ids = torch.IntTensor(words)
return ids
@dataclass
class SpeechUnitModelingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "Path to data config.json"})
max_token_duration: int = field(
default=20, metadata={"help": "all token durations are capped to this value"}
)
tokens_per_sample: int = field(
default=1024, metadata={"help": "tokens in a sample"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max target positions"}
)
# duration modeling
ignore_duration_input: bool = field(
default=False, metadata={"help": "whether token durations should be zeroed out"}
)
discrete_duration: bool = field(
default=False, metadata={"help": "treat duration as discrete variable"}
)
# F0 modeling
ignore_f0_input: bool = field(
default=False, metadata={"help": "whether F0 should be zeroed out"}
)
discrete_f0: bool = field(
default=False, metadata={"help": "load quantized f0. get bin from config"}
)
log_f0: bool = field(
default=False, metadata={"help": "whether f0 should be modeled in log space"}
)
normalize_f0_mean: bool = field(
default=False, metadata={"help": "whether normalize f0 by speaker mean"}
)
normalize_f0_std: bool = field(
default=False, metadata={"help": "whether normalize f0 by speaker stddev"}
)
interpolate_f0: bool = field(
default=False,
metadata={"help": "whether interpolate f0 for non-voiced segments"},
)
# input/output streams
stream_shifts: str = field(
default="0,0",
metadata={
"help": (
"comma-separated integer list denoting right-shift for "
"duration and pitch streams"
)
},
)
@register_task("speech_unit_modeling", dataclass=SpeechUnitModelingConfig)
class SpeechUnitLanguageModelingTask(FairseqTask):
def __init__(self, cfg: SpeechUnitModelingConfig) -> None:
super().__init__(cfg)
assert not self.cfg.normalize_f0_std or self.cfg.normalize_f0_mean
self.data_config = ExpressiveCodeDataConfig(cfg.data)
self._source_dictionary = self._target_dictionary = UnitDictionary(
n_units=self.data_config.n_units
)
self._source_duration_dictionary = self._target_duration_dictionary = (
UnitDictionary(n_units=self.cfg.max_token_duration + 1, clip=True)
if self.cfg.discrete_duration
else None
)
self._source_f0_dictionary = self._target_f0_dictionary = (
UnitDictionary(n_units=self.data_config.f0_vq_n_units)
if self.cfg.discrete_f0
else None
)
self._channel_names = ["token", "duration", "f0"]
self._channel_sizes = [
len(self.target_dictionary),
len(self.target_duration_dictionary) if self.cfg.discrete_duration else 1,
len(self.target_f0_dictionary) if self.cfg.discrete_f0 else 1,
]
@property
def source_dictionary(self) -> Optional[Dictionary]:
return self._source_dictionary
@property
def source_duration_dictionary(self) -> Optional[Dictionary]:
return self._source_duration_dictionary
@property
def source_f0_dictionary(self) -> Optional[Dictionary]:
return self._source_f0_dictionary
@property
def channel_names(self) -> List[str]:
return self._channel_names
@property
def channel_sizes(self) -> List[int]:
return self._channel_sizes
@property
def dictionary(self) -> Optional[Dictionary]:
return self._source_dictionary
@property
def target_dictionary(self) -> Optional[Dictionary]:
return self._target_dictionary
@property
def target_duration_dictionary(self) -> Optional[Dictionary]:
return self._target_duration_dictionary
@property
def target_f0_dictionary(self) -> Optional[Dictionary]:
return self._target_f0_dictionary
@property
def dictionaries(self) -> List[Dictionary]:
return [self._dictionaries[l] for l in self.cfg.labels]
@classmethod
def setup_task(
cls, cfg: SpeechUnitModelingConfig, **kwargs
) -> "SpeechUnitLanguageModelingTask":
return cls(cfg)
def load_dataset(self, split: str, **kwargs) -> None:
self.datasets[split] = CodeDataset(
manifest=self.data_config.manifests[split],
dictionary=self.source_dictionary,
dur_dictionary=self.source_duration_dictionary,
f0_dictionary=self.source_f0_dictionary,
config=self.data_config,
discrete_dur=self.cfg.discrete_duration,
discrete_f0=self.cfg.discrete_f0,
log_f0=self.cfg.log_f0,
normalize_f0_mean=self.cfg.normalize_f0_mean,
normalize_f0_std=self.cfg.normalize_f0_std,
interpolate_f0=self.cfg.interpolate_f0,
shifts=self.cfg.stream_shifts,
)
def max_positions(self) -> Tuple[int, int]:
return (sys.maxsize, sys.maxsize)
def build_criterion(self, cfg: DictConfig):
import fairseq.criterions
return fairseq.criterions.build_criterion(cfg, self)
| 7,533 | 32.484444 | 88 | py |
null | DA-Transformer-main/fairseq/tasks/text_to_speech.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import os.path as op
import torch
import torch.nn.functional as F
import numpy as np
from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDatasetCreator
from fairseq.tasks import register_task
from fairseq.tasks.speech_to_text import SpeechToTextTask
from fairseq.speech_generator import (
AutoRegressiveSpeechGenerator,
NonAutoregressiveSpeechGenerator,
TeacherForcingAutoRegressiveSpeechGenerator,
)
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
try:
from tensorboardX import SummaryWriter
except ImportError:
# logger.info("Please install tensorboardX: pip install tensorboardX")
SummaryWriter = None
@register_task("text_to_speech")
class TextToSpeechTask(SpeechToTextTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1200,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument("--n-frames-per-step", type=int, default=1)
parser.add_argument("--eos-prob-threshold", type=float, default=0.5)
parser.add_argument("--eval-inference", action="store_true")
parser.add_argument("--eval-tb-nsample", type=int, default=8)
parser.add_argument("--vocoder", type=str, default="griffin_lim")
parser.add_argument("--spec-bwd-max-iter", type=int, default=8)
def __init__(self, args, src_dict):
super().__init__(args, src_dict)
self.src_dict = src_dict
self.sr = self.data_cfg.config.get("features").get("sample_rate")
self.tensorboard_writer = None
self.tensorboard_dir = ""
if args.tensorboard_logdir and SummaryWriter is not None:
self.tensorboard_dir = os.path.join(args.tensorboard_logdir, "valid_extra")
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = TextToSpeechDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.src_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
n_frames_per_step=self.args.n_frames_per_step,
speaker_to_id=self.speaker_to_id,
)
@property
def target_dictionary(self):
return None
@property
def source_dictionary(self):
return self.src_dict
def get_speaker_embeddings_path(self):
speaker_emb_path = None
if self.data_cfg.config.get("speaker_emb_filename") is not None:
speaker_emb_path = op.join(
self.args.data, self.data_cfg.config.get("speaker_emb_filename")
)
return speaker_emb_path
@classmethod
def get_speaker_embeddings(cls, args):
embed_speaker = None
if args.speaker_to_id is not None:
if args.speaker_emb_path is None:
embed_speaker = torch.nn.Embedding(
len(args.speaker_to_id), args.speaker_embed_dim
)
else:
speaker_emb_mat = np.load(args.speaker_emb_path)
assert speaker_emb_mat.shape[1] == args.speaker_embed_dim
embed_speaker = torch.nn.Embedding.from_pretrained(
torch.from_numpy(speaker_emb_mat),
freeze=True,
)
logger.info(
f"load speaker embeddings from {args.speaker_emb_path}. "
f"train embedding? {embed_speaker.weight.requires_grad}\n"
f"embeddings:\n{speaker_emb_mat}"
)
return embed_speaker
def build_model(self, cfg, from_checkpoint=False):
cfg.pitch_min = self.data_cfg.config["features"].get("pitch_min", None)
cfg.pitch_max = self.data_cfg.config["features"].get("pitch_max", None)
cfg.energy_min = self.data_cfg.config["features"].get("energy_min", None)
cfg.energy_max = self.data_cfg.config["features"].get("energy_max", None)
cfg.speaker_emb_path = self.get_speaker_embeddings_path()
model = super().build_model(cfg, from_checkpoint)
self.generator = None
if getattr(cfg, "eval_inference", False):
self.generator = self.build_generator([model], cfg)
return model
def build_generator(self, models, cfg, vocoder=None, **unused):
if vocoder is None:
vocoder = self.build_default_vocoder()
model = models[0]
if getattr(model, "NON_AUTOREGRESSIVE", False):
return NonAutoregressiveSpeechGenerator(model, vocoder, self.data_cfg)
else:
generator = AutoRegressiveSpeechGenerator
if getattr(cfg, "teacher_forcing", False):
generator = TeacherForcingAutoRegressiveSpeechGenerator
logger.info("Teacher forcing mode for generation")
return generator(
model,
vocoder,
self.data_cfg,
max_iter=self.args.max_target_positions,
eos_prob_threshold=self.args.eos_prob_threshold,
)
def build_default_vocoder(self):
from fairseq.models.text_to_speech.vocoder import get_vocoder
vocoder = get_vocoder(self.args, self.data_cfg)
if torch.cuda.is_available() and not self.args.cpu:
vocoder = vocoder.cuda()
else:
vocoder = vocoder.cpu()
return vocoder
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if getattr(self.args, "eval_inference", False):
hypos, inference_losses = self.valid_step_with_inference(
sample, model, self.generator
)
for k, v in inference_losses.items():
assert k not in logging_output
logging_output[k] = v
picked_id = 0
if self.tensorboard_dir and (sample["id"] == picked_id).any():
self.log_tensorboard(
sample,
hypos[: self.args.eval_tb_nsample],
model._num_updates,
is_na_model=getattr(model, "NON_AUTOREGRESSIVE", False),
)
return loss, sample_size, logging_output
def valid_step_with_inference(self, sample, model, generator):
hypos = generator.generate(model, sample, has_targ=True)
losses = {
"mcd_loss": 0.0,
"targ_frames": 0.0,
"pred_frames": 0.0,
"nins": 0.0,
"ndel": 0.0,
}
rets = batch_mel_cepstral_distortion(
[hypo["targ_waveform"] for hypo in hypos],
[hypo["waveform"] for hypo in hypos],
self.sr,
normalize_type=None,
)
for d, extra in rets:
pathmap = extra[-1]
losses["mcd_loss"] += d.item()
losses["targ_frames"] += pathmap.size(0)
losses["pred_frames"] += pathmap.size(1)
losses["nins"] += (pathmap.sum(dim=1) - 1).sum().item()
losses["ndel"] += (pathmap.sum(dim=0) - 1).sum().item()
return hypos, losses
def log_tensorboard(self, sample, hypos, num_updates, is_na_model=False):
if self.tensorboard_writer is None:
self.tensorboard_writer = SummaryWriter(self.tensorboard_dir)
tb_writer = self.tensorboard_writer
for b in range(len(hypos)):
idx = sample["id"][b]
text = sample["src_texts"][b]
targ = hypos[b]["targ_feature"]
pred = hypos[b]["feature"]
attn = hypos[b]["attn"]
if is_na_model:
data = plot_tts_output(
[targ.transpose(0, 1), pred.transpose(0, 1)],
[f"target (idx={idx})", "output"],
attn,
"alignment",
ret_np=True,
suptitle=text,
)
else:
eos_prob = hypos[b]["eos_prob"]
data = plot_tts_output(
[targ.transpose(0, 1), pred.transpose(0, 1), attn],
[f"target (idx={idx})", "output", "alignment"],
eos_prob,
"eos prob",
ret_np=True,
suptitle=text,
)
tb_writer.add_image(
f"inference_sample_{b}", data, num_updates, dataformats="HWC"
)
if hypos[b]["waveform"] is not None:
targ_wave = hypos[b]["targ_waveform"].detach().cpu().float()
pred_wave = hypos[b]["waveform"].detach().cpu().float()
tb_writer.add_audio(
f"inference_targ_{b}", targ_wave, num_updates, sample_rate=self.sr
)
tb_writer.add_audio(
f"inference_pred_{b}", pred_wave, num_updates, sample_rate=self.sr
)
def save_figure_to_numpy(fig):
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
DEFAULT_V_MIN = np.log(1e-5)
def plot_tts_output(
data_2d,
title_2d,
data_1d,
title_1d,
figsize=(24, 4),
v_min=DEFAULT_V_MIN,
v_max=3,
ret_np=False,
suptitle="",
):
try:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
raise ImportError("Please install Matplotlib: pip install matplotlib")
data_2d = [
x.detach().cpu().float().numpy() if isinstance(x, torch.Tensor) else x
for x in data_2d
]
fig, axes = plt.subplots(1, len(data_2d) + 1, figsize=figsize)
if suptitle:
fig.suptitle(suptitle[:400]) # capped at 400 chars
axes = [axes] if len(data_2d) == 0 else axes
for ax, x, name in zip(axes, data_2d, title_2d):
ax.set_title(name)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
im = ax.imshow(
x,
origin="lower",
aspect="auto",
vmin=max(x.min(), v_min),
vmax=min(x.max(), v_max),
)
fig.colorbar(im, cax=cax, orientation="vertical")
if isinstance(data_1d, torch.Tensor):
data_1d = data_1d.detach().cpu().numpy()
axes[-1].plot(data_1d)
axes[-1].set_title(title_1d)
plt.tight_layout()
if ret_np:
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close(fig)
return data
def antidiag_indices(offset, min_i=0, max_i=None, min_j=0, max_j=None):
"""
for a (3, 4) matrix with min_i=1, max_i=3, min_j=1, max_j=4, outputs
offset=2 (1, 1),
offset=3 (2, 1), (1, 2)
offset=4 (2, 2), (1, 3)
offset=5 (2, 3)
constraints:
i + j = offset
min_j <= j < max_j
min_i <= offset - j < max_i
"""
if max_i is None:
max_i = offset + 1
if max_j is None:
max_j = offset + 1
min_j = max(min_j, offset - max_i + 1, 0)
max_j = min(max_j, offset - min_i + 1, offset + 1)
j = torch.arange(min_j, max_j)
i = offset - j
return torch.stack([i, j])
def batch_dynamic_time_warping(distance, shapes=None):
"""full batched DTW without any constraints
distance: (batchsize, max_M, max_N) matrix
shapes: (batchsize,) vector specifying (M, N) for each entry
"""
# ptr: 0=left, 1=up-left, 2=up
ptr2dij = {0: (0, -1), 1: (-1, -1), 2: (-1, 0)}
bsz, m, n = distance.size()
cumdist = torch.zeros_like(distance)
backptr = torch.zeros_like(distance).type(torch.int32) - 1
# initialize
cumdist[:, 0, :] = distance[:, 0, :].cumsum(dim=-1)
cumdist[:, :, 0] = distance[:, :, 0].cumsum(dim=-1)
backptr[:, 0, :] = 0
backptr[:, :, 0] = 2
# DP with optimized anti-diagonal parallelization, O(M+N) steps
for offset in range(2, m + n - 1):
ind = antidiag_indices(offset, 1, m, 1, n)
c = torch.stack(
[
cumdist[:, ind[0], ind[1] - 1],
cumdist[:, ind[0] - 1, ind[1] - 1],
cumdist[:, ind[0] - 1, ind[1]],
],
dim=2,
)
v, b = c.min(axis=-1)
backptr[:, ind[0], ind[1]] = b.int()
cumdist[:, ind[0], ind[1]] = v + distance[:, ind[0], ind[1]]
# backtrace
pathmap = torch.zeros_like(backptr)
for b in range(bsz):
i = m - 1 if shapes is None else (shapes[b][0] - 1).item()
j = n - 1 if shapes is None else (shapes[b][1] - 1).item()
dtwpath = [(i, j)]
while (i != 0 or j != 0) and len(dtwpath) < 10000:
assert i >= 0 and j >= 0
di, dj = ptr2dij[backptr[b, i, j].item()]
i, j = i + di, j + dj
dtwpath.append((i, j))
dtwpath = dtwpath[::-1]
indices = torch.from_numpy(np.array(dtwpath))
pathmap[b, indices[:, 0], indices[:, 1]] = 1
return cumdist, backptr, pathmap
def compute_l2_dist(x1, x2):
"""compute an (m, n) L2 distance matrix from (m, d) and (n, d) matrices"""
return torch.cdist(x1.unsqueeze(0), x2.unsqueeze(0), p=2).squeeze(0).pow(2)
def compute_rms_dist(x1, x2):
l2_dist = compute_l2_dist(x1, x2)
return (l2_dist / x1.size(1)).pow(0.5)
def get_divisor(pathmap, normalize_type):
if normalize_type is None:
return 1
elif normalize_type == "len1":
return pathmap.size(0)
elif normalize_type == "len2":
return pathmap.size(1)
elif normalize_type == "path":
return pathmap.sum().item()
else:
raise ValueError(f"normalize_type {normalize_type} not supported")
def batch_compute_distortion(y1, y2, sr, feat_fn, dist_fn, normalize_type):
d, s, x1, x2 = [], [], [], []
for cur_y1, cur_y2 in zip(y1, y2):
assert cur_y1.ndim == 1 and cur_y2.ndim == 1
cur_x1 = feat_fn(cur_y1)
cur_x2 = feat_fn(cur_y2)
x1.append(cur_x1)
x2.append(cur_x2)
cur_d = dist_fn(cur_x1, cur_x2)
d.append(cur_d)
s.append(d[-1].size())
max_m = max(ss[0] for ss in s)
max_n = max(ss[1] for ss in s)
d = torch.stack(
[F.pad(dd, (0, max_n - dd.size(1), 0, max_m - dd.size(0))) for dd in d]
)
s = torch.LongTensor(s).to(d.device)
cumdists, backptrs, pathmaps = batch_dynamic_time_warping(d, s)
rets = []
itr = zip(s, x1, x2, d, cumdists, backptrs, pathmaps)
for (m, n), cur_x1, cur_x2, dist, cumdist, backptr, pathmap in itr:
cumdist = cumdist[:m, :n]
backptr = backptr[:m, :n]
pathmap = pathmap[:m, :n]
divisor = get_divisor(pathmap, normalize_type)
distortion = cumdist[-1, -1] / divisor
ret = distortion, (cur_x1, cur_x2, dist, cumdist, backptr, pathmap)
rets.append(ret)
return rets
def batch_mel_cepstral_distortion(y1, y2, sr, normalize_type="path", mfcc_fn=None):
"""
https://arxiv.org/pdf/2011.03568.pdf
The root mean squared error computed on 13-dimensional MFCC using DTW for
alignment. MFCC features are computed from an 80-channel log-mel
spectrogram using a 50ms Hann window and hop of 12.5ms.
y1: list of waveforms
y2: list of waveforms
sr: sampling rate
"""
try:
import torchaudio
except ImportError:
raise ImportError("Please install torchaudio: pip install torchaudio")
if mfcc_fn is None or mfcc_fn.sample_rate != sr:
melkwargs = {
"n_fft": int(0.05 * sr),
"win_length": int(0.05 * sr),
"hop_length": int(0.0125 * sr),
"f_min": 20,
"n_mels": 80,
"window_fn": torch.hann_window,
}
mfcc_fn = torchaudio.transforms.MFCC(
sr, n_mfcc=13, log_mels=True, melkwargs=melkwargs
).to(y1[0].device)
return batch_compute_distortion(
y1,
y2,
sr,
lambda y: mfcc_fn(y).transpose(-1, -2),
compute_rms_dist,
normalize_type,
)
| 17,258 | 33.380478 | 88 | py |
null | DA-Transformer-main/fairseq/tasks/translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import json
import logging
import os
from typing import Optional
from argparse import Namespace
from omegaconf import II
import numpy as np
from fairseq import metrics, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
prepend_bos_src=None,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
elif prepend_bos_src is not None:
logger.info(f"prepending src bos: {prepend_bos_src}")
src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@dataclass
class TranslationConfig(FairseqDataclass):
data: Optional[str] = field(
default=None,
metadata={
"help": "colon separated path to data directories list, will be iterated upon during epochs "
"in round-robin manner; however, valid and test data are always in the first directory "
"to avoid the need for repeating them in all directories"
},
)
source_lang: Optional[str] = field(
default=None,
metadata={
"help": "source language",
"argparse_alias": "-s",
},
)
target_lang: Optional[str] = field(
default=None,
metadata={
"help": "target language",
"argparse_alias": "-t",
},
)
load_alignments: bool = field(
default=False, metadata={"help": "load the binarized alignments"}
)
left_pad_source: bool = field(
default=True, metadata={"help": "pad the source on the left"}
)
left_pad_target: bool = field(
default=False, metadata={"help": "pad the target on the left"}
)
max_source_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
upsample_primary: int = field(
default=-1, metadata={"help": "the amount of upsample primary dataset"}
)
truncate_source: bool = field(
default=False, metadata={"help": "truncate source to max-source-positions"}
)
num_batch_buckets: int = field(
default=0,
metadata={
"help": "if >0, then bucket source and target lengths into "
"N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations"
},
)
train_subset: str = II("dataset.train_subset")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
# options for reporting BLEU during validation
eval_bleu: bool = field(
default=False, metadata={"help": "evaluation with BLEU scores"}
)
eval_bleu_args: Optional[str] = field(
default="{}",
metadata={
"help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string'
},
)
eval_bleu_detok: str = field(
default="space",
metadata={
"help": "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; "
"use 'space' to disable detokenization; see fairseq.data.encoders for other options"
},
)
eval_bleu_detok_args: Optional[str] = field(
default="{}",
metadata={"help": "args for building the tokenizer, if needed, as JSON string"},
)
eval_tokenized_bleu: bool = field(
default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
)
eval_bleu_order: int = field(
default=4, metadata={"help": "bleu order"}
)
eval_bleu_remove_bpe: Optional[str] = field(
default=None,
metadata={
"help": "remove BPE before computing BLEU",
"argparse_const": "@@ ",
},
)
eval_bleu_print_samples: bool = field(
default=False, metadata={"help": "print sample generations during validation"}
)
@register_task("translation", dataclass=TranslationConfig)
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
"""
cfg: TranslationConfig
def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):
super().__init__(cfg)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, cfg: TranslationConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
# find language pair automatically
if cfg.source_lang is None or cfg.target_lang is None:
cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0])
if cfg.source_lang is None or cfg.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(cfg.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(cfg.target_lang, len(tgt_dict)))
return cls(cfg, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
if split != self.cfg.train_subset:
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
load_alignments=self.cfg.load_alignments,
truncate_source=self.cfg.truncate_source,
num_buckets=self.cfg.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.cfg.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, cfg, from_checkpoint=False):
model = super().build_model(cfg, from_checkpoint)
if self.cfg.eval_bleu:
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
)
gen_args = json.loads(self.cfg.eval_bleu_args)
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
EVAL_BLEU_ORDER = self.cfg.eval_bleu_order
import sacrebleu
if sacrebleu.BLEU.NGRAM_ORDER != self.cfg.eval_bleu_order:
sacrebleu.BLEU.NGRAM_ORDER = self.cfg.eval_bleu_order
func = sacrebleu.BLEU.extract_ngrams
sacrebleu.BLEU.extract_ngrams = lambda x: func(x, min_order=1, max_order=self.cfg.eval_bleu_order)
if self.cfg.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_bleu:
def sum_logs(key):
import torch
result = sum(log.get(key, 0) for log in logging_outputs)
if torch.is_tensor(result):
result = result.cpu()
return result
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
try:
from sacrebleu.metrics import BLEU
comp_bleu = BLEU.compute_bleu
except ImportError:
# compatibility API for sacrebleu 1.x
import sacrebleu
comp_bleu = sacrebleu.compute_bleu
fn_sig = inspect.getfullargspec(comp_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
if sum(meters["_bleu_totals"].sum) > 0:
bleu = comp_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth,
)
return round(bleu.score, 2)
else:
return 0
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.cfg.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.cfg.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.cfg.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| 18,386 | 34.912109 | 110 | py |
null | DA-Transformer-main/fairseq/tasks/translation_from_pretrained_bart.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.data import LanguagePairDataset
from . import register_task
from .translation import TranslationTask, load_langpair_dataset
@register_task("translation_from_pretrained_bart")
class TranslationFromPretrainedBARTTask(TranslationTask):
"""
Translate from source language to target language with a model initialized with a multilingual pretrain.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--langs', type=str, metavar='LANG',
help='comma-separated list of monolingual language, '
'for example, "en,de,fr". These should match the '
'langs from pretraining (and be in the same order). '
'You should always add all pretraining language idx '
'during finetuning.')
parser.add_argument('--prepend-bos', action='store_true',
help='prepend bos token to each sentence, which matches '
'mBART pretraining')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.langs = args.langs.split(",")
for d in [src_dict, tgt_dict]:
for l in self.langs:
d.add_symbol("[{}]".format(l))
d.add_symbol("<mask>")
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=getattr(self.args, "max_source_positions", 1024),
max_target_positions=getattr(self.args, "max_target_positions", 1024),
load_alignments=self.args.load_alignments,
prepend_bos=getattr(self.args, "prepend_bos", False),
append_source_id=True,
)
def build_generator(self, models, args, **unused):
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)),
)
else:
from fairseq.sequence_generator import SequenceGenerator
return SequenceGenerator(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
src_lang_id = self.source_dictionary.index("[{}]".format(self.args.source_lang))
source_tokens = []
for s_t in src_tokens:
s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)])
source_tokens.append(s_t)
dataset = LanguagePairDataset(
source_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
return dataset
| 5,243 | 38.428571 | 108 | py |
null | DA-Transformer-main/fairseq/tasks/translation_from_pretrained_xlm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.tasks.translation import TranslationConfig, TranslationTask
from . import register_task
@dataclass
class TranslationFromPretrainedXLMConfig(TranslationConfig):
pass
@register_task(
"translation_from_pretrained_xlm", dataclass=TranslationFromPretrainedXLMConfig
)
class TranslationFromPretrainedXLMTask(TranslationTask):
"""
Same as TranslationTask except use the MaskedLMDictionary class so that
we can load data that was binarized with the MaskedLMDictionary class.
This task should be used for the entire training pipeline when we want to
train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,
training NMT with the pretrained XLM checkpoint, and subsequent evaluation
of that trained model.
"""
@classmethod
def load_dictionary(cls, filename):
"""Load the masked LM dictionary from the filename
Args:
filename (str): the filename
"""
return MaskedLMDictionary.load(filename)
| 1,294 | 31.375 | 83 | py |
null | DA-Transformer-main/fairseq/tasks/translation_lev.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
from fairseq import utils
from fairseq.data import LanguagePairDataset
from fairseq.dataclass import ChoiceEnum
from fairseq.tasks import register_task
from fairseq.tasks.translation import (
TranslationConfig,
TranslationTask,
load_langpair_dataset,
)
from fairseq.utils import new_arange
NOISE_CHOICES = ChoiceEnum(["random_delete", "random_mask", "no_noise", "full_mask"])
@dataclass
class TranslationLevenshteinConfig(TranslationConfig):
noise: NOISE_CHOICES = field(
default="random_delete",
metadata={"help": "type of noise"},
)
@register_task("translation_lev", dataclass=TranslationLevenshteinConfig)
class TranslationLevenshteinTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
cfg: TranslationLevenshteinConfig
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
prepend_bos=True,
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0
)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True
)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = (
2
+ (
(target_length - 2)
* target_score.new_zeros(target_score.size(0), 1).uniform_()
).long()
)
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = (
target_tokens.gather(1, target_rank)
.masked_fill_(target_cutoff, pad)
.gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1])
)
prev_target_tokens = prev_target_tokens[
:, : prev_target_tokens.ne(pad).sum(1).max()
]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = (
target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos)
)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk
)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = (
target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad)
)
return target_tokens.masked_fill(~target_mask, unk)
if self.cfg.noise == "random_delete":
return _random_delete(target_tokens)
elif self.cfg.noise == "random_mask":
return _random_mask(target_tokens)
elif self.cfg.noise == "full_mask":
return _full_mask(target_tokens)
elif self.cfg.noise == "no_noise":
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args, **unused):
# add models input to match the API for SequenceGenerator
from fairseq.iterative_refinement_generator import IterativeRefinementGenerator
return IterativeRefinementGenerator(
self.target_dictionary,
eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0),
max_iter=getattr(args, "iter_decode_max_iter", 10),
beam_size=getattr(args, "iter_decode_with_beam", 1),
reranking=getattr(args, "iter_decode_with_external_reranker", False),
decoding_format=getattr(args, "decoding_format", None),
adaptive=not getattr(args, "iter_decode_force_max_iter", False),
retain_history=getattr(args, "retain_iter_history", False),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError(
"Constrained decoding with the translation_lev task is not supported"
)
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, append_bos=True
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
| 7,416 | 36.841837 | 103 | py |
null | DA-Transformer-main/fairseq/tasks/translation_multi_simple_epoch.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import logging
import time
import torch
from fairseq.data import (
FairseqDataset,
LanguagePairDataset,
ListDataset,
data_utils,
iterators,
)
from fairseq.data.multilingual.multilingual_data_manager import (
MultilingualDatasetManager,
)
from fairseq.data.multilingual.sampling_method import SamplingMethod
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.utils import FileContentsAction
###
def get_time_gap(s, e):
return (
datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)
).__str__()
###
logger = logging.getLogger(__name__)
@register_task("translation_multi_simple_epoch")
class TranslationMultiSimpleEpochTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
langs (List[str]): a list of languages that are being supported
dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries
training (bool): whether the task should be configured for training or not
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='inference source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='inference target language')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr',
action=FileContentsAction)
parser.add_argument('--keep-inference-langtok', action='store_true',
help='keep language tokens in inference output (e.g. for analysis or debugging)')
SamplingMethod.add_arguments(parser)
MultilingualDatasetManager.add_args(parser)
# fmt: on
def __init__(self, args, langs, dicts, training):
super().__init__(args)
self.langs = langs
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.source_langs = [d.split("-")[0] for d in self.lang_pairs]
self.target_langs = [d.split("-")[1] for d in self.lang_pairs]
self.check_dicts(self.dicts, self.source_langs, self.target_langs)
self.sampling_method = SamplingMethod.build_sampler(args, self)
self.data_manager = MultilingualDatasetManager.setup_data_manager(
args, self.lang_pairs, langs, dicts, self.sampling_method
)
def check_dicts(self, dicts, source_langs, target_langs):
if self.args.source_dict is not None or self.args.target_dict is not None:
# no need to check whether the source side and target side are sharing dictionaries
return
src_dict = dicts[source_langs[0]]
tgt_dict = dicts[target_langs[0]]
for src_lang in source_langs:
assert (
src_dict == dicts[src_lang]
), "Diffrent dictionary are specified for different source languages; "
"TranslationMultiSimpleEpochTask only supports one shared dictionary across all source languages"
for tgt_lang in target_langs:
assert (
tgt_dict == dicts[tgt_lang]
), "Diffrent dictionary are specified for different target languages; "
"TranslationMultiSimpleEpochTask only supports one shared dictionary across all target languages"
@classmethod
def setup_task(cls, args, **kwargs):
langs, dicts, training = MultilingualDatasetManager.prepare(
cls.load_dictionary, args, **kwargs
)
return cls(args, langs, dicts, training)
def has_sharded_data(self, split):
return self.data_manager.has_sharded_data(split)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split in self.datasets:
dataset = self.datasets[split]
if self.has_sharded_data(split):
if self.args.virtual_epoch_size is not None:
if dataset.load_next_shard:
shard_epoch = dataset.shard_epoch
else:
# no need to load next shard so skip loading
# also this avoid always loading from beginning of the data
return
else:
shard_epoch = epoch
else:
# estimate the shard epoch from virtual data size and virtual epoch size
shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch)
logger.info(f"loading data for {split} epoch={epoch}/{shard_epoch}")
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
if split in self.datasets:
del self.datasets[split]
logger.info("old dataset deleted manually")
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
self.datasets[split] = self.data_manager.load_dataset(
split,
self.training,
epoch=epoch,
combine=combine,
shard_epoch=shard_epoch,
**kwargs,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the multilingual_translation task is not supported"
)
src_data = ListDataset(src_tokens, src_lengths)
dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary)
src_langtok_spec, tgt_langtok_spec = self.args.langtoks["main"]
if self.args.lang_tok_replacing_bos_eos:
dataset = self.data_manager.alter_dataset_langtok(
dataset,
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
dataset.src = self.data_manager.src_dataset_tranform_func(
self.args.source_lang,
self.args.target_lang,
dataset=dataset.src,
spec=src_langtok_spec,
)
return dataset
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if not getattr(args, "keep_inference_langtok", False):
_, tgt_langtok_spec = self.args.langtoks["main"]
if tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
extra_gen_cls_kwargs["symbols_to_strip_from_output"] = {tgt_lang_tok}
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_model(self, args, from_checkpoint=False):
return super().build_model(args, from_checkpoint)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
_, tgt_langtok_spec = self.args.langtoks["main"]
if not self.args.lang_tok_replacing_bos_eos:
if prefix_tokens is None and tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.size(0)
prefix_tokens = (
torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens)
)
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
else:
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
if tgt_langtok_spec
else self.target_dictionary.eos(),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
return self.data_manager.get_source_dictionary(self.source_langs[0])
@property
def target_dictionary(self):
return self.data_manager.get_target_dictionary(self.target_langs[0])
def create_batch_sampler_func(
self,
max_positions,
ignore_invalid_inputs,
max_tokens,
max_sentences,
required_batch_size_multiple=1,
seed=1,
):
def construct_batch_sampler(dataset, epoch):
splits = [
s for s, _ in self.datasets.items() if self.datasets[s] == dataset
]
split = splits[0] if len(splits) > 0 else None
# NEW implementation
if epoch is not None:
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
start_time = time.time()
logger.info(f"start batch sampler: mem usage: {data_utils.get_mem_usage()}")
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
logger.info(
f"[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
# filter examples that are too large
if max_positions is not None:
my_time = time.time()
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
logger.info(
f"[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
# create mini-batches with given size constraints
my_time = time.time()
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
logger.info(
f"[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}"
)
logger.info(
f"[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
return batch_sampler
return construct_batch_sampler
# we need to override get_batch_iterator because we want to reset the epoch iterator each time
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
skip_remainder_batch=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 0).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
grouped_shuffling (bool, optional): group batches with each groups
containing num_shards batches and shuffle groups. Reduces difference
between sequence lengths among workers for batches sorted by length.
update_epoch_batch_itr (bool optional): if true then donot use the cached
batch iterator for the epoch
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# initialize the dataset with the correct starting epoch
assert isinstance(dataset, FairseqDataset)
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
if self.args.sampling_method == "RoundRobin":
batch_iter = super().get_batch_iterator(
dataset,
max_tokens=max_tokens,
max_sentences=max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
data_buffer_size=data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=skip_remainder_batch,
update_epoch_batch_itr=update_epoch_batch_itr,
)
self.dataset_to_epoch_iter[dataset] = batch_iter
return batch_iter
construct_batch_sampler = self.create_batch_sampler_func(
max_positions,
ignore_invalid_inputs,
max_tokens,
max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
)
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=construct_batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
return epoch_iter
| 17,926 | 39.558824 | 113 | py |
null | DA-Transformer-main/fairseq_cli/__init__.py | 0 | 0 | 0 | py | |
null | DA-Transformer-main/fairseq_cli/datpreprocess.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import logging
import os
import shutil
import sys
import typing as tp
from argparse import Namespace
from itertools import zip_longest
from fairseq import options, tasks, utils
from fairseq.binarizer import (
AlignmentDatasetBinarizer,
FileBinarizer,
VocabularyDatasetBinarizer,
)
from fairseq.data import Dictionary
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.preprocess")
#####################################################################
# file name tools
#####################################################################
def _train_path(lang, trainpref):
return "{}{}".format(trainpref, ("." + lang) if lang else "")
def _file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def _dest_path(prefix, lang, destdir):
return os.path.join(destdir, _file_name(prefix, lang))
def _dict_path(lang, destdir):
return _dest_path("dict", lang, destdir) + ".txt"
def dataset_dest_prefix(args, output_prefix, lang):
base = os.path.join(args.destdir, output_prefix)
if lang is not None:
lang_part = f".{args.source_lang}-{args.target_lang}.{lang}"
elif args.only_source:
lang_part = ""
else:
lang_part = f".{args.source_lang}-{args.target_lang}"
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
return "{}.{}".format(dataset_dest_prefix(args, output_prefix, lang), extension)
#####################################################################
# dictionary tools
#####################################################################
def _build_dictionary(
filenames,
task,
args,
src=False,
tgt=False,
):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
#####################################################################
# bin file creation logic
#####################################################################
def _make_binary_dataset(
vocab: Dictionary,
input_prefix: str,
output_prefix: str,
lang: tp.Optional[str],
num_workers: int,
args: Namespace,
):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab)))
binarizer = VocabularyDatasetBinarizer(
vocab,
append_eos=True,
)
input_file = "{}{}".format(input_prefix, ("." + lang) if lang is not None else "")
full_output_prefix = dataset_dest_prefix(args, output_prefix, lang)
final_summary = FileBinarizer.multiprocess_dataset(
input_file,
args.dataset_impl,
binarizer,
full_output_prefix,
vocab_size=len(vocab),
num_workers=num_workers,
)
logger.info(f"[{lang}] {input_file}: {final_summary} (by {vocab.unk_word})")
def _make_binary_alignment_dataset(
input_prefix: str, output_prefix: str, num_workers: int, args: Namespace
):
binarizer = AlignmentDatasetBinarizer(utils.parse_alignment)
input_file = input_prefix
full_output_prefix = dataset_dest_prefix(args, output_prefix, lang=None)
final_summary = FileBinarizer.multiprocess_dataset(
input_file,
args.dataset_impl,
binarizer,
full_output_prefix,
vocab_size=None,
num_workers=num_workers,
)
logger.info(
"[alignments] {}: parsed {} alignments".format(
input_file, final_summary.num_seq
)
)
#####################################################################
# routing logic
#####################################################################
def _make_dataset(
vocab: Dictionary,
input_prefix: str,
output_prefix: str,
lang: tp.Optional[str],
args: Namespace,
num_workers: int,
):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = _dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
args.destdir,
)
shutil.copyfile(_file_name(input_prefix, lang), output_text_file)
else:
_make_binary_dataset(
vocab, input_prefix, output_prefix, lang, num_workers, args
)
def _make_all(lang, vocab, args):
if args.trainpref:
_make_dataset(
vocab, args.trainpref, "train", lang, args=args, num_workers=args.workers
)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
_make_dataset(
vocab, validpref, outprefix, lang, args=args, num_workers=args.workers
)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
_make_dataset(
vocab, testpref, outprefix, lang, args=args, num_workers=args.workers
)
def _make_all_alignments(args):
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
_make_binary_alignment_dataset(
args.trainpref + "." + args.align_suffix,
"train.align",
num_workers=args.workers,
args=args,
)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
_make_binary_alignment_dataset(
args.validpref + "." + args.align_suffix,
"valid.align",
num_workers=args.workers,
args=args,
)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
_make_binary_alignment_dataset(
args.testpref + "." + args.align_suffix,
"test.align",
num_workers=args.workers,
args=args,
)
#####################################################################
# align
#####################################################################
def _align_files(args, src_dict, tgt_dict):
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = _train_path(args.source_lang, args.trainpref)
tgt_file_name = _train_path(args.target_lang, args.trainpref)
freq_map = {}
with open(args.alignfile, "r", encoding="utf-8") as align_file:
with open(src_file_name, "r", encoding="utf-8") as src_file:
with open(tgt_file_name, "r", encoding="utf-8") as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w",
encoding="utf-8",
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
#####################################################################
# MAIN
#####################################################################
def main(args):
# setup some basic things
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
filename=os.path.join(args.destdir, "preprocess.log"),
)
)
logger.info(args)
assert (
args.dataset_impl != "huffman"
), "preprocessing.py doesn't support Huffman yet, use HuffmanCodeBuilder directly."
# build dictionaries
target = not args.only_source
if not args.srcdict and os.path.exists(_dict_path(args.source_lang, args.destdir)):
raise FileExistsError(_dict_path(args.source_lang, args.destdir))
if (
target
and not args.tgtdict
and os.path.exists(_dict_path(args.target_lang, args.destdir))
):
raise FileExistsError(_dict_path(args.target_lang, args.destdir))
task = tasks.get_task(args.task)
assert task.__name__ == "TranslationDATTask", "datpreprocess only supports TranslationDatTask, please specify \"--task translation_dat_task\" in your script"
if args.joined_dictionary:
assert (
not args.srcdict or not args.tgtdict
), "cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict, args.seg_tokens)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict, args.seg_tokens)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = _build_dictionary(
{
_train_path(lang, args.trainpref)
for lang in [args.source_lang, args.target_lang]
},
task=task,
args=args,
src=True,
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict, args.seg_tokens)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = _build_dictionary(
[_train_path(args.source_lang, args.trainpref)],
task=task,
args=args,
src=True,
)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict, args.seg_tokens)
else:
assert (
args.trainpref
), "--trainpref must be set if --tgtdict is not specified"
tgt_dict = _build_dictionary(
[_train_path(args.target_lang, args.trainpref)],
task=task,
args=args,
tgt=True,
)
else:
tgt_dict = None
# save dictionaries
src_dict.save(_dict_path(args.source_lang, args.destdir))
if target and tgt_dict is not None:
tgt_dict.save(_dict_path(args.target_lang, args.destdir))
if args.dict_only:
return
_make_all(args.source_lang, src_dict, args)
if target:
_make_all(args.target_lang, tgt_dict, args)
# align the datasets if needed
if args.align_suffix:
_make_all_alignments(args)
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
_align_files(args, src_dict=src_dict, tgt_dict=tgt_dict)
def cli_main():
parser = options.get_preprocessing_parser()
parser.add_argument("--seg-tokens", type=int, default=0, help=
"This parameter specifies the number of special tokens that will be used for segment id."
"If you are using pre-trained checkpoints, please set this value to 32.")
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| 12,727 | 30.979899 | 161 | py |
null | DA-Transformer-main/fairseq_cli/eval_lm.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import sys
from argparse import Namespace
from typing import Iterable, List, Optional
import torch
from omegaconf import DictConfig
import fairseq
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter
from fairseq.sequence_scorer import SequenceScorer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.eval_lm")
def eval_lm(
models: List[fairseq.models.FairseqModel],
source_dictionary: fairseq.data.Dictionary,
batch_iterator: Iterable,
post_process: Optional[str] = None,
output_word_probs: bool = False,
output_word_stats: bool = False,
target_dictionary: Optional[fairseq.data.Dictionary] = None,
softmax_batch: int = 0,
remove_bos_token: bool = False,
device: Optional[torch.device] = None,
):
"""
Args:
models (List[~fairseq.models.FairseqModel]): list of models to
evaluate. Models are essentially `nn.Module` instances, but
must be compatible with fairseq's `SequenceScorer`.
source_dictionary (~fairseq.data.Dictionary): dictionary for
applying any relevant post processing or outputing word
probs/stats.
batch_iterator (Iterable): yield batches of data
post_process (Optional[str]): post-process text by removing BPE,
letter segmentation, etc. Valid options can be found in
fairseq.data.utils.post_process, although not all options
are implemented here.
output_word_probs (Optional[bool]): output words and their
predicted log probabilities
output_word_stats (Optional[bool]): output word statistics such
as word count and average probability
target_dictionary (Optional[~fairseq.data.Dictionary]): output
dictionary (defaults to *source_dictionary*)
softmax_batch (Optional[bool]): if BxT is more than this, will
batch the softmax over vocab to this amount of tokens, in
order to fit into GPU memory
remove_bos_token (Optional[bool]): if True, confirm that the
first token is the beginning-of-sentence symbol (according
to the relevant dictionary) and remove it from the output
device (Optional[torch.device]): device to use for evaluation
(defaults to device of first model parameter)
"""
if target_dictionary is None:
target_dictionary = source_dictionary
if device is None:
device = next(models[0].parameters()).device
gen_timer = StopwatchMeter()
scorer = SequenceScorer(target_dictionary, softmax_batch)
score_sum = 0.0
count = 0
if post_process is not None:
if post_process in {"subword_nmt", "@@ "}:
bpe_cont = post_process.rstrip()
bpe_toks = {
i
for i in range(len(source_dictionary))
if source_dictionary[i].endswith(bpe_cont)
}
else:
raise NotImplementedError(
"--post-process={post_process} is not implemented"
)
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
for sample in batch_iterator:
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample, device=device)
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample["ntokens"])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample["id"][i]
tokens = hypo["tokens"]
tgt_len = tokens.numel()
pos_scores = hypo["positional_scores"].float()
if remove_bos_token:
assert hypo["tokens"][0].item() == target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf"))
if inf_scores.any():
logger.info(
"skipping tokens with inf scores:",
target_dictionary.string(tokens[inf_scores.nonzero()]),
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if output_word_probs or output_word_stats:
w = ""
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(
pos_scores[i].item(), next_prob
)
is_bpe = False
w = ""
if output_word_probs:
logger.info(
str(int(sample_id))
+ " "
+ (
"\t".join(
"{} [{:2f}]".format(x[0], x[1]) for x in word_prob
)
)
)
avg_nll_loss = (
-score_sum / count / math.log(2) if count > 0 else 0
) # convert to base 2
logger.info(
"Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format(
gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0
)
)
if output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
return {
"loss": avg_nll_loss,
"perplexity": 2**avg_nll_loss,
}
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
"""increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen"""
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return "{}\t{}\t{}\t{}\t{}\t{}".format(
self.word,
self.count,
self.log_prob,
self.is_bpe,
self.next_word_prob,
self.count - self.missing_next_words,
)
def main(cfg: DictConfig, **unused_kwargs):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
logger.info(cfg)
if cfg.eval_lm.context_window > 0:
# reduce tokens per sample by the required context window size
cfg.task.tokens_per_sample -= cfg.eval_lm.context_window
# Initialize the task using the current *cfg*
task = tasks.setup_task(cfg.task)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=eval(cfg.common_eval.model_overrides),
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
task=task,
)
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
# Optimize ensemble for generation and set the source and dest dicts on the model
# (required by scorer)
for model in models:
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
assert len(models) > 0
logger.info(
"num. model params: {:,}".format(sum(p.numel() for p in models[0].parameters()))
)
# Load dataset splits
task.load_dataset(cfg.dataset.gen_subset)
dataset = task.dataset(cfg.dataset.gen_subset)
logger.info(
"{} {} {:,} examples".format(
cfg.task.data, cfg.dataset.gen_subset, len(dataset)
)
)
itr = task.eval_lm_dataloader(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens or 36000,
batch_size=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
*[model.max_positions() for model in models]
),
num_shards=max(
cfg.dataset.num_shards,
cfg.distributed_training.distributed_world_size,
),
shard_id=max(
cfg.dataset.shard_id,
cfg.distributed_training.distributed_rank,
),
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
context_window=cfg.eval_lm.context_window,
)
itr = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
results = eval_lm(
models=models,
source_dictionary=task.source_dictionary,
batch_iterator=itr,
post_process=cfg.common_eval.post_process,
output_word_probs=cfg.eval_lm.output_word_probs,
output_word_stats=cfg.eval_lm.output_word_stats,
target_dictionary=task.target_dictionary,
softmax_batch=cfg.eval_lm.softmax_batch,
remove_bos_token=getattr(cfg.task, "add_bos_token", False),
)
logger.info(
"Loss (base 2): {:.4f}, Perplexity: {:.2f}".format(
results["loss"], results["perplexity"]
)
)
return results
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
| 11,959 | 33.367816 | 108 | py |
null | DA-Transformer-main/fairseq_cli/fastgenerate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import inspect
import numpy as np
import torch
from omegaconf import DictConfig
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from collections import namedtuple
ConcurrentTask = namedtuple("ConcurrentTask", ['hypos', 'sample'])
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(
cfg.common_eval.results_path,
"generate-{}.txt".format(cfg.dataset.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
else:
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
# gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, tgt_dict)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
task_queue = [] # task queue for overlapped decoding
whole_timer = StopwatchMeter()
whole_timer.start()
for nowidx, sample in enumerate(progress):
sample_cpu = sample
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
if "allow_future" in inspect.getargspec(task.inference_step).args:
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
allow_future=True
)
else:
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
task_queue.append(ConcurrentTask(hypos, sample_cpu))
while task_queue:
max_wait = 0 if nowidx == len(progress) - 1 else 30
if not isinstance(task_queue[0].hypos, list): # concurrent task
if not task_queue[0].hypos.future.done() and len(task_queue) < max_wait:
break # process next task
else:
concurrent_task = task_queue.pop()
hypos = concurrent_task.hypos
sample = concurrent_task.sample
hypos_result = hypos.future.result()
for fn, args in zip(hypos.fn, hypos.args):
hypos_result = fn(hypos_result, *args)
hypos = hypos_result
else:
concurrent_task = task_queue.pop()
hypos = concurrent_task.hypos
sample = concurrent_task.sample
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
for i, sample_id in enumerate(sample["id"].tolist()):
hypo_str = tgt_dict.string(
hypos[i][0]['tokens'].int().cpu(), cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator)
)
print("H-{}\t0.00\t{}".format(sample_id, hypo_str), file=output_file,)
continue
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
whole_timer.stop(1)
logger.info(
"All process finished in {:.1f}s".format(
whole_timer.sum,
)
)
return scorer
def cli_main():
parser = options.get_generation_parser()
# TODO: replace this workaround with refactoring of `AudioPretraining`
# parser.add_argument(
# "--arch",
# "-a",
# metavar="ARCH",
# default="wav2vec2",
# help="Model architecture. For constructing tasks that rely on "
# "model args (e.g. `AudioPretraining`)",
# )
import argparse
debug_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
debug_parser.add_argument("--debug", action="store_true")
debug_args, left_args = debug_parser.parse_known_args()
if debug_args.debug:
import debugpy
debugpy.listen(("0.0.0.0", 5679))
logging.info("wait debug")
debugpy.wait_for_client()
args = options.parse_args_and_arch(parser, input_args=left_args)
main(args)
if __name__ == "__main__":
cli_main()
| 10,340 | 33.47 | 116 | py |
null | DA-Transformer-main/fairseq_cli/generate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import inspect
import numpy as np
import torch
from omegaconf import DictConfig
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(
cfg.common_eval.results_path,
"generate-{}.txt".format(cfg.dataset.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
else:
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, tgt_dict)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
warn_fastgenerate = False
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
if not warn_fastgenerate and "allow_future" in inspect.getargspec(task.inference_step).args \
and getattr(models[0].args, "decode_max_workers", 0) >= 1 \
and getattr(models[0].args, "decode_strategy", None) == "beamsearch":
logging.warn("You should use fairseq-fastgenerate instead of faiseq-generate to enable overlapped decoding !")
warn_fastgenerate = True
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
has_target = sample["target"] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text(
sample_id
)
target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
cfg.common_eval.post_process,
escape_unk=True,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(
generator
),
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not cfg.common_eval.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str), file=output_file)
if has_target:
print("T-{}\t{}".format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
if not cfg.common_eval.quiet:
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print(
"H-{}\t{}\t{}".format(sample_id, score, hypo_str),
file=output_file,
)
# detokenized hypothesis
print(
"D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str),
file=output_file,
)
print(
"P-{}\t{}".format(
sample_id,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"]
.div_(math.log(2))
.tolist(),
)
),
),
file=output_file,
)
if cfg.generation.print_alignment == "hard":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_alignment == "soft":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[",".join(src_probs) for src_probs in alignment]
),
),
file=output_file,
)
if cfg.generation.print_step:
print(
"I-{}\t{}".format(sample_id, hypo["steps"]),
file=output_file,
)
if cfg.generation.retain_iter_history:
for step, h in enumerate(hypo["history"]):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h["tokens"].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print(
"E-{}_{}\t{}".format(sample_id, step, h_str),
file=output_file,
)
# Score only the top hypothesis
if has_target and j == 0:
if (
align_dict is not None
or cfg.common_eval.post_process is not None
):
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(
target_str, add_if_not_exist=True
)
hypo_tokens = tgt_dict.encode_line(
detok_hypo_str, add_if_not_exist=True
)
if hasattr(scorer, "add_string"):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
if has_target:
if cfg.bpe and not cfg.generation.sacrebleu:
if cfg.common_eval.post_process:
logger.warning(
"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization"
)
else:
logger.warning(
"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization"
)
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
"Generate {} with beam={}: {}".format(
cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string()
),
file=output_file,
)
return scorer
def cli_main():
parser = options.get_generation_parser()
# TODO: replace this workaround with refactoring of `AudioPretraining`
# parser.add_argument(
# "--arch",
# "-a",
# metavar="ARCH",
# default="wav2vec2",
# help="Model architecture. For constructing tasks that rely on "
# "model args (e.g. `AudioPretraining`)",
# )
import argparse
debug_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
debug_parser.add_argument("--debug", action="store_true")
debug_args, left_args = debug_parser.parse_known_args()
if debug_args.debug:
import debugpy
debugpy.listen(("0.0.0.0", 5679))
logging.info("wait debug")
debugpy.wait_for_client()
args = options.parse_args_and_arch(parser, input_args=left_args)
main(args)
if __name__ == "__main__":
cli_main()
| 16,693 | 37.377011 | 180 | py |
null | DA-Transformer-main/fairseq_cli/hydra_train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import hydra
import torch
from hydra.core.hydra_config import HydraConfig
from omegaconf import OmegaConf, open_dict
from fairseq import distributed_utils, metrics
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.initialize import add_defaults, hydra_init
from fairseq.dataclass.utils import omegaconf_no_object_check
from fairseq.utils import reset_logging
from fairseq_cli.train import main as pre_main
logger = logging.getLogger("fairseq_cli.hydra_train")
@hydra.main(config_path=os.path.join("..", "fairseq", "config"), config_name="config")
def hydra_main(cfg: FairseqConfig) -> float:
_hydra_main(cfg)
def _hydra_main(cfg: FairseqConfig, **kwargs) -> float:
add_defaults(cfg)
if cfg.common.reset_logging:
reset_logging() # Hydra hijacks logging, fix that
else:
# check if directly called or called through hydra_main
if HydraConfig.initialized():
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(
HydraConfig.get().job_logging, resolve=True
)
with omegaconf_no_object_check():
cfg = OmegaConf.create(
OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
)
OmegaConf.set_struct(cfg, True)
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, pre_main, **kwargs)
else:
distributed_utils.call_main(cfg, pre_main, **kwargs)
except BaseException as e:
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! " + str(e))
# get best val and return - useful for sweepers
try:
best_val = metrics.get_smoothed_value(
"valid", cfg.checkpoint.best_checkpoint_metric
)
except:
best_val = None
if best_val is None:
best_val = float("inf")
return best_val
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
hydra_init(cfg_name)
hydra_main()
if __name__ == "__main__":
cli_main()
| 2,714 | 28.51087 | 116 | py |
null | DA-Transformer-main/fairseq_cli/interactive.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
import ast
import fileinput
import logging
import math
import os
import sys
import time
from argparse import Namespace
from collections import namedtuple
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.token_generation_constraints import pack_constraints, unpack_constraints
from fairseq_cli.generate import get_symbols_to_strip_from_output
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.interactive")
Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints")
Translation = namedtuple("Translation", "src_str hypos pos_scores alignments")
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, cfg, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if cfg.generation.constraints:
# Strip (tab-delimited) contraints, if present, from input lines,
# store them in batch_constraints
batch_constraints = [list() for _ in lines]
for i, line in enumerate(lines):
if "\t" in line:
lines[i], *batch_constraints[i] = line.split("\t")
# Convert each List[str] to List[Tensor]
for i, constraint_list in enumerate(batch_constraints):
batch_constraints[i] = [
task.target_dictionary.encode_line(
encode_fn_target(constraint),
append_eos=False,
add_if_not_exist=False,
)
for constraint in constraint_list
]
if cfg.generation.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn)
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(
tokens, lengths, constraints=constraints_tensor
),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch["id"]
src_tokens = batch["net_input"]["src_tokens"]
src_lengths = batch["net_input"]["src_lengths"]
constraints = batch.get("constraints", None)
yield Batch(
ids=ids,
src_tokens=src_tokens,
src_lengths=src_lengths,
constraints=constraints,
)
def main(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
start_time = time.time()
total_translate_time = 0
utils.import_user_module(cfg.common)
if cfg.interactive.buffer_size < 1:
cfg.interactive.buffer_size = 1
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.batch_size = 1
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
not cfg.dataset.batch_size
or cfg.dataset.batch_size <= cfg.interactive.buffer_size
), "--batch-size cannot be larger than --buffer-size"
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Setup task, e.g., translation
task = tasks.setup_task(cfg.task)
# Load ensemble
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
if cfg.generation.constraints:
logger.warning(
"NOTE: Constrained decoding currently assumes a shared subword vocabulary."
)
if cfg.interactive.buffer_size > 1:
logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info("Type the input sentence and press return:")
start_id = 0
for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size):
results = []
for batch in make_batches(inputs, cfg, task, max_positions, encode_fn):
bsz = batch.src_tokens.size(0)
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
constraints = batch.constraints
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
if constraints is not None:
constraints = constraints.cuda()
sample = {
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
}
translate_start_time = time.time()
translations = task.inference_step(
generator, models, sample, constraints=constraints
)
translate_time = time.time() - translate_start_time
total_translate_time += translate_time
list_constraints = [[] for _ in range(bsz)]
if cfg.generation.constraints:
list_constraints = [unpack_constraints(c) for c in constraints]
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
constraints = list_constraints[i]
results.append(
(
start_id + id,
src_tokens_i,
hypos,
{
"constraints": constraints,
"time": translate_time / len(translations),
},
)
)
# sort output to match input order
for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]):
src_str = ""
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
print("S-{}\t{}".format(id_, src_str))
print("W-{}\t{:.3f}\tseconds".format(id_, info["time"]))
for constraint in info["constraints"]:
print(
"C-{}\t{}".format(
id_,
tgt_dict.string(constraint, cfg.common_eval.post_process),
)
)
# Process top predictions
for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print("H-{}\t{}\t{}".format(id_, score, hypo_str))
# detokenized hypothesis
print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str))
print(
"P-{}\t{}".format(
id_,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"].div_(math.log(2)).tolist(),
)
),
)
)
if cfg.generation.print_alignment:
alignment_str = " ".join(
["{}-{}".format(src, tgt) for src, tgt in alignment]
)
print("A-{}\t{}".format(id_, alignment_str))
# update running id_ counter
start_id += len(inputs)
logger.info(
"Total time: {:.3f} seconds; translation time: {:.3f}".format(
time.time() - start_time, total_translate_time
)
)
def cli_main():
parser = options.get_interactive_generation_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
| 11,465 | 35.056604 | 88 | py |
null | DA-Transformer-main/fairseq_cli/preprocess.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import logging
import os
import shutil
import sys
import typing as tp
from argparse import Namespace
from itertools import zip_longest
from fairseq import options, tasks, utils
from fairseq.binarizer import (
AlignmentDatasetBinarizer,
FileBinarizer,
VocabularyDatasetBinarizer,
)
from fairseq.data import Dictionary
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.preprocess")
#####################################################################
# file name tools
#####################################################################
def _train_path(lang, trainpref):
return "{}{}".format(trainpref, ("." + lang) if lang else "")
def _file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def _dest_path(prefix, lang, destdir):
return os.path.join(destdir, _file_name(prefix, lang))
def _dict_path(lang, destdir):
return _dest_path("dict", lang, destdir) + ".txt"
def dataset_dest_prefix(args, output_prefix, lang):
base = os.path.join(args.destdir, output_prefix)
if lang is not None:
lang_part = f".{args.source_lang}-{args.target_lang}.{lang}"
elif args.only_source:
lang_part = ""
else:
lang_part = f".{args.source_lang}-{args.target_lang}"
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
return "{}.{}".format(dataset_dest_prefix(args, output_prefix, lang), extension)
#####################################################################
# dictionary tools
#####################################################################
def _build_dictionary(
filenames,
task,
args,
src=False,
tgt=False,
):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
#####################################################################
# bin file creation logic
#####################################################################
def _make_binary_dataset(
vocab: Dictionary,
input_prefix: str,
output_prefix: str,
lang: tp.Optional[str],
num_workers: int,
args: Namespace,
):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab)))
binarizer = VocabularyDatasetBinarizer(
vocab,
append_eos=True,
)
input_file = "{}{}".format(input_prefix, ("." + lang) if lang is not None else "")
full_output_prefix = dataset_dest_prefix(args, output_prefix, lang)
final_summary = FileBinarizer.multiprocess_dataset(
input_file,
args.dataset_impl,
binarizer,
full_output_prefix,
vocab_size=len(vocab),
num_workers=num_workers,
)
logger.info(f"[{lang}] {input_file}: {final_summary} (by {vocab.unk_word})")
def _make_binary_alignment_dataset(
input_prefix: str, output_prefix: str, num_workers: int, args: Namespace
):
binarizer = AlignmentDatasetBinarizer(utils.parse_alignment)
input_file = input_prefix
full_output_prefix = dataset_dest_prefix(args, output_prefix, lang=None)
final_summary = FileBinarizer.multiprocess_dataset(
input_file,
args.dataset_impl,
binarizer,
full_output_prefix,
vocab_size=None,
num_workers=num_workers,
)
logger.info(
"[alignments] {}: parsed {} alignments".format(
input_file, final_summary.num_seq
)
)
#####################################################################
# routing logic
#####################################################################
def _make_dataset(
vocab: Dictionary,
input_prefix: str,
output_prefix: str,
lang: tp.Optional[str],
args: Namespace,
num_workers: int,
):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = _dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
args.destdir,
)
shutil.copyfile(_file_name(input_prefix, lang), output_text_file)
else:
_make_binary_dataset(
vocab, input_prefix, output_prefix, lang, num_workers, args
)
def _make_all(lang, vocab, args):
if args.trainpref:
_make_dataset(
vocab, args.trainpref, "train", lang, args=args, num_workers=args.workers
)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
_make_dataset(
vocab, validpref, outprefix, lang, args=args, num_workers=args.workers
)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
_make_dataset(
vocab, testpref, outprefix, lang, args=args, num_workers=args.workers
)
def _make_all_alignments(args):
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
_make_binary_alignment_dataset(
args.trainpref + "." + args.align_suffix,
"train.align",
num_workers=args.workers,
args=args,
)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
_make_binary_alignment_dataset(
args.validpref + "." + args.align_suffix,
"valid.align",
num_workers=args.workers,
args=args,
)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
_make_binary_alignment_dataset(
args.testpref + "." + args.align_suffix,
"test.align",
num_workers=args.workers,
args=args,
)
#####################################################################
# align
#####################################################################
def _align_files(args, src_dict, tgt_dict):
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = _train_path(args.source_lang, args.trainpref)
tgt_file_name = _train_path(args.target_lang, args.trainpref)
freq_map = {}
with open(args.alignfile, "r", encoding="utf-8") as align_file:
with open(src_file_name, "r", encoding="utf-8") as src_file:
with open(tgt_file_name, "r", encoding="utf-8") as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w",
encoding="utf-8",
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
#####################################################################
# MAIN
#####################################################################
def main(args):
# setup some basic things
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
filename=os.path.join(args.destdir, "preprocess.log"),
)
)
logger.info(args)
assert (
args.dataset_impl != "huffman"
), "preprocessing.py doesn't support Huffman yet, use HuffmanCodeBuilder directly."
# build dictionaries
target = not args.only_source
if not args.srcdict and os.path.exists(_dict_path(args.source_lang, args.destdir)):
raise FileExistsError(_dict_path(args.source_lang, args.destdir))
if (
target
and not args.tgtdict
and os.path.exists(_dict_path(args.target_lang, args.destdir))
):
raise FileExistsError(_dict_path(args.target_lang, args.destdir))
task = tasks.get_task(args.task)
if args.joined_dictionary:
assert (
not args.srcdict or not args.tgtdict
), "cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = _build_dictionary(
{
_train_path(lang, args.trainpref)
for lang in [args.source_lang, args.target_lang]
},
task=task,
args=args,
src=True,
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = _build_dictionary(
[_train_path(args.source_lang, args.trainpref)],
task=task,
args=args,
src=True,
)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --tgtdict is not specified"
tgt_dict = _build_dictionary(
[_train_path(args.target_lang, args.trainpref)],
task=task,
args=args,
tgt=True,
)
else:
tgt_dict = None
# save dictionaries
src_dict.save(_dict_path(args.source_lang, args.destdir))
if target and tgt_dict is not None:
tgt_dict.save(_dict_path(args.target_lang, args.destdir))
if args.dict_only:
return
_make_all(args.source_lang, src_dict, args)
if target:
_make_all(args.target_lang, tgt_dict, args)
# align the datasets if needed
if args.align_suffix:
_make_all_alignments(args)
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
_align_files(args, src_dict=src_dict, tgt_dict=tgt_dict)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| 12,218 | 30.01269 | 87 | py |
null | DA-Transformer-main/fairseq_cli/score.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BLEU scoring of generated translations against reference translations.
"""
import argparse
import os
import sys
from fairseq.data import dictionary
from fairseq.scoring import bleu
def get_parser():
parser = argparse.ArgumentParser(
description="Command-line script for BLEU scoring."
)
# fmt: off
parser.add_argument('-s', '--sys', default='-', help='system output')
parser.add_argument('-r', '--ref', required=True, help='references')
parser.add_argument('-o', '--order', default=4, metavar='N',
type=int, help='consider ngrams up to this order')
parser.add_argument('--ignore-case', action='store_true',
help='case-insensitive scoring')
parser.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
parser.add_argument('--sentence-bleu', action='store_true',
help='report sentence-level BLEUs (i.e., with +1 smoothing)')
# fmt: on
return parser
def cli_main():
parser = get_parser()
args = parser.parse_args()
print(args)
assert args.sys == "-" or os.path.exists(
args.sys
), "System output file {} does not exist".format(args.sys)
assert os.path.exists(args.ref), "Reference file {} does not exist".format(args.ref)
dict = dictionary.Dictionary()
def readlines(fd):
for line in fd.readlines():
if args.ignore_case:
yield line.lower()
else:
yield line
if args.sacrebleu:
import sacrebleu
def score(fdsys):
with open(args.ref) as fdref:
print(sacrebleu.corpus_bleu(fdsys, [fdref]).format())
elif args.sentence_bleu:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for i, (sys_tok, ref_tok) in enumerate(
zip(readlines(fdsys), readlines(fdref))
):
scorer.reset(one_init=True)
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(i, scorer.result_string(args.order))
else:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(
bleu.BleuConfig(
pad=dict.pad(),
eos=dict.eos(),
unk=dict.unk(),
)
)
for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(scorer.result_string(args.order))
if args.sys == "-":
score(sys.stdin)
else:
with open(args.sys, "r") as f:
score(f)
if __name__ == "__main__":
cli_main()
| 3,287 | 30.92233 | 88 | py |
null | DA-Transformer-main/fairseq_cli/train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple
# We need to setup root logger before importing any fairseq libraries.
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from fairseq import checkpoint_utils, options, quantization_utils, tasks, utils
from fairseq.data import data_utils, iterators
from fairseq.data.plasma_utils import PlasmaStore
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.initialize import add_defaults
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
def main(cfg: FairseqConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
add_defaults(cfg)
if (
distributed_utils.is_master(cfg.distributed_training)
and "job_logging_cfg" in cfg
):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
if cfg.common.log_file is not None:
handler = logging.FileHandler(filename=cfg.common.log_file)
logger.addHandler(handler)
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print args
logger.info(cfg)
if cfg.checkpoint.write_checkpoints_asynchronously:
try:
import iopath # noqa: F401
except ImportError:
logging.exception(
"Asynchronous checkpoint writing is specified but iopath is "
"not installed: `pip install iopath`"
)
return
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
if cfg.distributed_training.ddp_backend == "fully_sharded":
with fsdp_enable_wrap(cfg.distributed_training):
model = fsdp_wrap(task.build_model(cfg.model))
else:
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("criterion: {}".format(criterion.__class__.__name__))
logger.info(
"num. shared model params: {:,} (num. trained: {:,})".format(
sum(
p.numel() for p in model.parameters() if not getattr(p, "expert", False)
),
sum(
p.numel()
for p in model.parameters()
if not getattr(p, "expert", False) and p.requires_grad
),
)
)
logger.info(
"num. expert model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)),
sum(
p.numel()
for p in model.parameters()
if getattr(p, "expert", False) and p.requires_grad
),
)
)
# Load valid dataset (we load training data below, based on the latest checkpoint)
# We load the valid dataset AFTER building the model
data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg)
if cfg.dataset.combine_valid_subsets:
task.load_dataset("valid", combine=True, epoch=1)
else:
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per device = {} and max sentences per device = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
if cfg.common.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("load_checkpoint") # wait for all workers
max_epoch = cfg.optimization.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
if lr <= cfg.optimization.stop_min_lr:
logger.info(
f"stopping training because current learning rate ({lr}) is smaller "
"than or equal to minimum learning rate "
f"(--stop-min-lr={cfg.optimization.stop_min_lr})"
)
break
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
# ioPath implementation to wait for all asynchronous file writes to complete.
if cfg.checkpoint.write_checkpoints_asynchronously:
logger.info(
"ioPath PathManager waiting for all asynchronous checkpoint "
"writes to finish."
)
PathManager.async_close()
logger.info("ioPath PathManager finished waiting.")
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(
itr,
update_freq,
skip_remainder_batch=cfg.optimization.skip_remainder_batch,
)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_file=cfg.common.log_file,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
azureml_logging=(
cfg.common.azureml_logging
if distributed_utils.is_master(cfg.distributed_training)
else False
),
)
progress.update_config(_flatten_config(cfg))
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
# remove any legacy Namespaces and replace with a single "args"
namespace = None
for k, v in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if namespace is not None:
config["args"] = vars(namespace)
return config
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
training_time_hours = trainer.cumulative_training_time() / (60 * 60)
if (
cfg.optimization.stop_time_hours > 0
and training_time_hours > cfg.optimization.stop_time_hours
):
should_stop = True
logger.info(
f"Stopping training due to "
f"cumulative_training_time: {training_time_hours} > "
f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or should_stop
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or should_stop
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
)
and not cfg.dataset.disable_validation
and num_updates >= cfg.dataset.validate_after_updates
)
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(
shuffle=False, set_dataset_epoch=False # use a fixed valid set
)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for i, sample in enumerate(progress):
if (
cfg.dataset.max_valid_steps is not None
and i > cfg.dataset.max_valid_steps
):
break
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())
if hasattr(task, "post_validate"):
task.post_validate(trainer.get_model(), stats, agg)
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
return valid_losses
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if cfg.common.use_plasma_view:
server = PlasmaStore(path=cfg.common.plasma_path)
logger.info(
f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}"
)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
# if cfg.common.use_plasma_view:
# server.server.kill()
if __name__ == "__main__":
cli_main()
| 18,945 | 34.215613 | 108 | py |
null | DA-Transformer-main/fairseq_cli/validate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from argparse import Namespace
from itertools import chain
import torch
from omegaconf import DictConfig
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import metrics, progress_bar
from fairseq.utils import reset_logging
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.validate")
def main(cfg: DictConfig, override_args=None):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
reset_logging()
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_world_size > 1:
data_parallel_world_size = distributed_utils.get_data_parallel_world_size()
data_parallel_rank = distributed_utils.get_data_parallel_rank()
else:
data_parallel_world_size = 1
data_parallel_rank = 0
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, "model_overrides", "{}")))
else:
overrides = None
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
)
model = models[0]
# Move models to GPU
for model in models:
model.eval()
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(saved_cfg)
# Build criterion
criterion = task.build_criterion(saved_cfg.criterion)
criterion.eval()
for subset in cfg.dataset.valid_subset.split(","):
try:
task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task)
dataset = task.dataset(subset)
except KeyError:
raise Exception("Cannot find dataset: " + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=data_parallel_world_size,
shard_id=data_parallel_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if data_parallel_world_size > 1:
log_outputs = distributed_utils.all_gather_list(
log_outputs,
max_size=cfg.common.all_gather_list_size,
group=distributed_utils.get_data_parallel_group(),
)
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
distributed_utils.call_main(
convert_namespace_to_omegaconf(args), main, override_args=override_args
)
if __name__ == "__main__":
cli_main()
| 5,228 | 32.954545 | 88 | py |
null | DA-Transformer-main/fs_plugins/__init__.py | from .criterions import *
from .models import *
from .tasks import *
from .optimizer import *
print("fairseq plugins loaded...") | 129 | 20.666667 | 34 | py |
null | DA-Transformer-main/fs_plugins/criterions/__init__.py | import os
import importlib
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fs_plugins.criterions." + file_name)
| 321 | 34.777778 | 69 | py |
null | DA-Transformer-main/fs_plugins/criterions/nat_dag_loss.py | ##########################################################################
# Copyright (C) 2022 COAI @ Tsinghua University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
import math
import re
import logging
from functools import reduce
import numpy as np
from typing import Union, Tuple, Optional
import sys
import torch
from torch import Tensor
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from torch.autograd import Function
from ..custom_ops import dag_loss, dag_best_alignment, dag_logsoftmax_gather_inplace, torch_dag_loss, torch_dag_best_alignment, torch_dag_logsoftmax_gather_inplace
from .utilities import parse_anneal_argument, get_anneal_value
logger = logging.getLogger(__name__)
@register_criterion("nat_dag_loss")
class NATDAGLoss(FairseqCriterion):
def __init__(self, cfg, task):
super().__init__(task)
self.cfg = cfg
assert cfg.label_smoothing == 0, "DAT loss does not support label smoothing for now"
self.glance_strategy = cfg.glance_strategy
self._glat_p_anneal_params = parse_anneal_argument(cfg.glat_p)
self.set_update_num(0)
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument("--label-smoothing", type=float, default=0,
help="Set the label smoothing value. DA-Transformer does not use label smoothing for now.")
parser.add_argument("--glat-p", type=str, default="0",
help="Set the glancing probability and its annealing schedule. For example, '0.5:0.1@200k' indicates annealing probability from 0.5 to 0.1 in 200k steps.")
parser.add_argument("--glance-strategy", type=str, default=None, help='Set the glancing strategy. Possible values: "number-random" or "None" or "CMLM"')
parser.add_argument("--no-force-emit", action="store_true", help="If true, the position of glanced tokens in the second forward pass will not be fixed.")
parser.add_argument("--use-pretrain-loss", action="store_true", help="If true, use the pre-training loss, i.e. the position of segment id will be fixed.")
parser.add_argument("--torch-dag-logsoftmax-gather", action="store_true",
help="Use torch native implementation for logsoftmax-gather. It may be slower and consume more GPU memory.")
parser.add_argument("--torch-dag-best-alignment", action="store_true",
help="Use torch native implementation for dag-best-alignment. It may be slower and consume more GPU memory.")
parser.add_argument("--torch-dag-loss", action="store_true",
help="Use torch native implementation for dag-loss. It may be slower and consume more GPU memory.")
def _compute_loss(self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = utils.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss_nofactor = loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor, "ntokens": outputs.shape[0], "loss_nofactor": loss_nofactor}
def _force_aligment(self, match, force_emit):
# force alignment by manipulating the tensor "match"
batch_size, tarlen, prelen = match.shape
matchmask = torch.zeros(batch_size, tarlen + 1, prelen, dtype=torch.bool, device=match.device).\
scatter_(1, force_emit.unsqueeze(1) + 1, 1)[:, 1:]
target_mask = matchmask.sum(dim=-1, keepdim=True) > 0
match = match.masked_fill(target_mask, 0) + torch.zeros_like(match).masked_fill_(~matchmask, float("-inf")).masked_fill_(~target_mask, 0)
# if output[j] is forced aligning to target[i], then: (1) match[i][j]:=0 and (2) forall k!=j, match[i][k]:="-inf"
# the final loss will be the sum of the losses on all fragments
return match
@torch.no_grad()
def _brute_force_fragment_sum(self, match, force_emit, links):
# a function for debugging. Use a brute force method to calculate the sum of loss on all fragments
match = match.clone()
force_emit = force_emit.clone()
links = links.clone()
batch_size, tarlen, prelen = match.shape
res = torch.zeros(batch_size, dtype=torch.float32, device=match.device)
for i in range(batch_size):
force_emit_sample = force_emit[i].cpu()
lastj = 0
lasttarj = 0
for j in range(1, prelen):
if force_emit_sample[j] >= 0 or j == prelen - 1:
if j == prelen - 1:
nowj = j
nowtarj = tarlen - 1
else:
nowj = j
nowtarj = force_emit_sample[j]
match_extract = match[i:i+1, lasttarj:nowtarj+1, lastj:nowj+1]
links_extract = links[i:i+1, lastj:nowj+1, lastj:nowj+1]
target_length = torch.tensor([match_extract.shape[1]])
output_length = torch.tensor([match_extract.shape[2]])
res[i] += torch_dag_loss(match_extract, links_extract, output_length, target_length)[0] - match_extract[0, 0, 0] - match_extract[0, -1, -1]
lastj = nowj
lasttarj = nowtarj
return res
def _compute_dag_loss(self, outputs, output_masks, targets, target_masks, links, label_smoothing=0.0, name="loss",
factor=1.0, force_emit=None, model=None):
batch_size = outputs.shape[0]
prelen = outputs.shape[1]
tarlen = targets.shape[1]
output_length = output_masks.sum(dim=-1)
target_length = target_masks.sum(dim=-1)
if self.cfg.torch_dag_logsoftmax_gather:
outputs, match = torch_dag_logsoftmax_gather_inplace(outputs, targets.unsqueeze(1).expand(-1, prelen, -1))
else:
outputs, match = dag_logsoftmax_gather_inplace(outputs, targets.unsqueeze(1).expand(-1, prelen, -1))
match = match.transpose(1, 2)
# verify the loss sum on all fragments is correct
# assert model.args.max_transition_length != -1
# tmp = self._brute_force_fragment_sum(match, force_emit, model.restore_valid_links(links))
# if matchmask is not None and not self.cfg.no_force_emit:
# glat_prev_mask = keep_word_mask.unsqueeze(1)
# match = match.masked_fill(glat_prev_mask, 0) + match.masked_fill(~matchmask, float("-inf")).masked_fill(~glat_prev_mask, 0).detach()
if force_emit is not None:
match = self._force_aligment(match, force_emit)
nvalidtokens = output_masks.sum()
if self.cfg.torch_dag_loss:
if model.args.max_transition_length != -1:
links = model.restore_valid_links(links)
loss_result = torch_dag_loss(match, links, output_length, target_length)
else:
assert model.args.max_transition_length != -1, "cuda dag loss does not support max_transition_length=-1. You can use a very large number such as 99999"
loss_result = dag_loss(match, links, output_length, target_length)
invalid_masks = loss_result.isinf().logical_or(loss_result.isnan())
loss_result.masked_fill_(invalid_masks, 0)
invalid_nsentences = invalid_masks.sum().detach()
loss = -(loss_result / target_length).mean()
nll_loss = loss.detach()
nsentences, ntokens = targets.shape[0], targets.ne(self.task.tgt_dict.pad()).sum()
loss_nofactor = loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss,
"factor": factor, "ntokens": ntokens, "nvalidtokens": nvalidtokens, "nsentences": nsentences,
"loss_nofactor": loss_nofactor, "invalid_nsentences": invalid_nsentences}
def _custom_loss(self, loss, name="loss", factor=1.0):
return {"name": name, "loss": loss, "factor": factor}
def set_update_num(self, update_num):
self.glat_p = get_anneal_value(self._glat_p_anneal_params, update_num)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
# B x T
src_tokens, src_lengths = (
sample["net_input"]["src_tokens"],
sample["net_input"]["src_lengths"],
)
tgt_tokens = sample["target"]
if sample.get("update_num", None) is not None: # in training
self.set_update_num(sample['update_num'])
prev_output_tokens = sample['net_input']['prev_output_tokens']
if self.glat_p == 0:
glat = None
else:
glat = {
"context_p": max(self.glat_p, 0),
"require_glance_grad": False
}
def glat_function(model, word_ins_out, tgt_tokens, prev_output_tokens, net_input, glat, links=None):
batch_size, prelen, _ = links.shape
tarlen = tgt_tokens.shape[1]
nonpad_positions = ~tgt_tokens.eq(model.pad)
target_length = (nonpad_positions).sum(1)
output_length = prev_output_tokens.ne(model.pad).sum(1)
pred_tokens = word_ins_out.argmax(-1)
if self.cfg.torch_dag_logsoftmax_gather:
word_ins_out, match = torch_dag_logsoftmax_gather_inplace(word_ins_out, tgt_tokens.unsqueeze(1).expand(-1, prelen, -1))
else:
word_ins_out, match = dag_logsoftmax_gather_inplace(word_ins_out, tgt_tokens.unsqueeze(1).expand(-1, prelen, -1))
match = match.transpose(1, 2)
force_emit = net_input['force_emit'] # force_emit is Not None only if using --upsample_base predict
if force_emit is None:
force_emit = -torch.ones_like(prev_output_tokens, dtype=torch.long) # do not force alignments
if self.cfg.use_pretrain_loss:
# forcing alignment on fragment IDs by manipulating the tensor "match"
assert self.cfg.upsample_base == "predict", "pretrain only allowed when using upsample_base = predict"
match = self._force_aligment(match, force_emit)
glat_prev_mask = force_emit >= 0 # these output positions should be preserved during remasking
else:
glat_prev_mask = torch.zeros_like(prev_output_tokens, dtype=torch.bool)
if self.cfg.torch_dag_best_alignment:
if model.args.max_transition_length != -1:
links = model.restore_valid_links(links)
path = torch_dag_best_alignment(match, links, output_length, target_length)
else:
assert model.args.max_transition_length != -1, "cuda dag best alignment does not support max_transition_length=-1. You can use a very large number such as 99999"
path = dag_best_alignment(match, links, output_length, target_length) # batch * prelen
predict_assigned_mask = path >= 0
oracle = tgt_tokens.gather(-1, path.clip(min=0)) # bsz * prelen
same_num = ((pred_tokens == oracle) & predict_assigned_mask & ~glat_prev_mask).sum(1)
if self.glance_strategy is None:
keep_prob = ((target_length - glat_prev_mask.sum(dim=-1) - same_num) / target_length * glat['context_p']).unsqueeze(-1) * predict_assigned_mask.float()
elif self.glance_strategy == 'number-random': # original glat implementation
prob = torch.randn(oracle.shape, device=tgt_tokens.device, dtype=torch.float)
prob.masked_fill_(~predict_assigned_mask | glat_prev_mask, -100)
glance_nums = ((target_length - glat_prev_mask.sum(dim=-1) - same_num) * glat['context_p'] + 0.5).to(torch.long)
#prob_thresh = prob.topk(glance_nums.max().clip(min=1))[0].gather(-1, (glance_nums - 1).clip(min=0).unsqueeze(-1)).squeeze(-1)
prob_thresh = prob.sort(descending=True)[0].gather(-1, (glance_nums - 1).clip(min=0).unsqueeze(-1)).squeeze(-1)
prob_thresh.masked_fill_(glance_nums == 0, 100)
keep_prob = (prob >= prob_thresh.unsqueeze(-1)).to(prob.dtype)
elif self.glance_strategy == "cmlm":
prob = torch.randn(oracle.shape, device=tgt_tokens.device, dtype=torch.float)
prob.masked_fill_(~predict_assigned_mask | glat_prev_mask, -100)
glance_nums = ((target_length - glat_prev_mask.sum(dim=-1)) * torch.rand_like(target_length, dtype=torch.float) + 0.5).to(torch.long)
#prob_thresh = prob.topk(glance_nums.max().clip(min=1))[0].gather(-1, (glance_nums - 1).clip(min=0).unsqueeze(-1)).squeeze(-1)
prob_thresh = prob.sort(descending=True)[0].gather(-1, (glance_nums - 1).clip(min=0).unsqueeze(-1)).squeeze(-1)
prob_thresh.masked_fill_(glance_nums == 0, 100)
keep_prob = (prob >= prob_thresh.unsqueeze(-1)).to(prob.dtype)
elif self.glance_strategy == "fix":
prob = torch.randn(oracle.shape, device=tgt_tokens.device, dtype=torch.float)
prob.masked_fill_(~predict_assigned_mask | glat_prev_mask, -100)
glance_nums = ((target_length - glat_prev_mask.sum(dim=-1)) * glat['context_p'] + 0.5).to(torch.long)
#prob_thresh = prob.topk(glance_nums.max().clip(min=1))[0].gather(-1, (glance_nums - 1).clip(min=0).unsqueeze(-1)).squeeze(-1)
prob_thresh = prob.sort(descending=True)[0].gather(-1, (glance_nums - 1).clip(min=0).unsqueeze(-1)).squeeze(-1)
prob_thresh.masked_fill_(glance_nums == 0, 100)
keep_prob = (prob >= prob_thresh.unsqueeze(-1)).to(prob.dtype)
keep_word_mask = (torch.rand(prev_output_tokens.shape, device=prev_output_tokens.device) < keep_prob).bool() | glat_prev_mask.squeeze(1)
glat_prev_output_tokens = prev_output_tokens.masked_fill(keep_word_mask, 0) + oracle.masked_fill(~keep_word_mask, 0)
glat_tgt_tokens = tgt_tokens
next_force_emit = path.masked_fill(~keep_word_mask, -1)
glat_info = {
"glat_accu": (same_num.sum() / target_length.sum()).detach(),
"glat_context_p": glat['context_p'],
"glat_keep": keep_prob.mean().detach(),
"force_emit": next_force_emit,
"glat_prev_output_tokens": glat_prev_output_tokens,
}
return glat_prev_output_tokens, glat_tgt_tokens, glat_info
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens, sample['net_input'], glat, glat_function)
losses = []
# DAG loss
_losses = self._compute_dag_loss(
outputs["word_ins"].get("out"),
prev_output_tokens.ne(self.task.tgt_dict.pad()),
outputs["word_ins"].get("tgt"),
outputs["word_ins"].get("mask", None),
outputs["links"],
name="dag-loss",
factor=1,
force_emit=outputs.get('force_emit', None),
model=model
)
losses += [_losses]
dag_nll_loss = _losses.get("nll_loss", 0.0)
nsentences = _losses["nsentences"]
ntokens = _losses["ntokens"]
nvalidtokens = _losses["nvalidtokens"]
invalid_nsentences = _losses["invalid_nsentences"]
#length
_losses = self._compute_loss(
outputs["length"].get("out"),
outputs["length"].get("tgt"),
None,
0,
name="length-loss",
factor=outputs["length"]["factor"], )
losses += [_losses]
length_nll_loss = _losses.get("nll_loss", 0.0)
loss = sum(l["loss"] for l in losses)
sample_size = 1
logging_output = {
"loss": loss.data,
"dag_nll-loss": dag_nll_loss.data,
"length_nll-loss": length_nll_loss.data,
"ntokens": ntokens,
"nvalidtokens": nvalidtokens,
"nsentences": nsentences,
"invalid_nsentences": invalid_nsentences,
"sample_size": sample_size,
"glat_acc": outputs.get("glat_accu", 0),
"glat_keep": outputs.get("glat_keep", 0),
}
for l in losses:
logging_output[l["name"]] = (
utils.item(l["loss_nofactor"])
if reduce
else l["loss_nofactor"]
)
# gpu_tracker.track()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
) # each batch is 1
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) # token-level loss
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nvalidtokens = sum(log.get('nvalidtokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
invalid_nsentences = sum(log.get('invalid_nsentences', 0) for log in logging_outputs)
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) # token-level loss
glat_acc = utils.item(sum(log.get("glat_acc", 0) for log in logging_outputs))
glat_keep = utils.item(sum(log.get("glat_keep", 0) for log in logging_outputs))
res = {
"ntokens": utils.item(ntokens),
"nsentences": utils.item(nsentences),
"nvalidtokens": utils.item(nvalidtokens),
"invalid_nsentences": utils.item(invalid_nsentences),
'tokens_perc': utils.item(nvalidtokens / ntokens),
'sentences_perc': 1 - utils.item(invalid_nsentences / nsentences),
}
res["loss"] = loss / sample_size
res["glat_acc"] = glat_acc / sample_size
res["glat_keep"] = glat_keep / sample_size
for key, value in res.items():
metrics.log_scalar(
key, value, sample_size, round=3
)
for key in logging_outputs[0]:
if key[-5:] == "-loss":
val = utils.item(sum(log.get(key, 0) for log in logging_outputs))
metrics.log_scalar(
key[:-5],
val / sample_size if sample_size > 0 else 0.0,
sample_size,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 20,968 | 47.204598 | 183 | py |
null | DA-Transformer-main/fs_plugins/criterions/utilities.py | ##########################################################################
# Copyright (C) 2022 COAI @ Tsinghua University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
def parse_anneal_argument(anneal_str):
def parse_value_pos(value_str):
if "@" in value_str:
value, pos = value_str.split("@")
else:
value = value_str
pos = "0"
return float(value), float(pos.replace("k", "000"))
res = []
for value_str in anneal_str.split(":"):
res.append(parse_value_pos(value_str))
return res
def get_anneal_value(anneal_params, update_num):
last_value, last_pos = anneal_params[0][0], 0
for value, pos in anneal_params:
if update_num < pos:
return last_value + (value - last_value) * (update_num - last_pos) / (pos - last_pos + 1)
last_value, last_pos = value, pos
return anneal_params[-1][0]
| 1,484 | 38.078947 | 101 | py |
null | DA-Transformer-main/fs_plugins/custom_ops/__init__.py | from .dag_loss import dag_loss, dag_best_alignment, dag_logsoftmax_gather_inplace, torch_dag_loss, torch_dag_best_alignment, torch_dag_logsoftmax_gather_inplace | 160 | 160 | 160 | py |
null | DA-Transformer-main/fs_plugins/custom_ops/dag_loss.cpp | // ##########################################################################
// Copyright (C) 2022 COAI @ Tsinghua University
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ###########################################################################
#include <torch/extension.h>
std::tuple<torch::Tensor, torch::Tensor> dag_loss(const torch::Tensor &match_all, const torch::Tensor &links, const torch::Tensor &output_length, const torch::Tensor &target_length, bool require_gradient, int config);
std::tuple<torch::Tensor, torch::Tensor> dag_loss_backward(const torch::Tensor &grad_output, const torch::Tensor &alpha, const torch::Tensor &beta, const torch::Tensor &match_all, const torch::Tensor &links, const torch::Tensor &output_length, const torch::Tensor &target_length, int config1, int config2);
std::tuple<torch::Tensor, torch::Tensor> dag_best_alignment(const torch::Tensor &match_all, const torch::Tensor &links, const torch::Tensor &output_length, const torch::Tensor &target_length, int config);
torch::Tensor logsoftmax_gather(torch::Tensor word_ins_out, const torch::Tensor &select_idx, bool require_gradient);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("dag_loss", &dag_loss, "DAG Loss");
m.def("dag_loss_backward", &dag_loss_backward, "DAG Loss Backward");
m.def("dag_best_alignment", &dag_best_alignment, "DAG Best Alignment");
m.def("logsoftmax_gather", &logsoftmax_gather, "logsoftmax + gather");
}
| 1,948 | 63.966667 | 306 | cpp |
null | DA-Transformer-main/fs_plugins/custom_ops/dag_loss.py | ##########################################################################
# Copyright (C) 2022 COAI @ Tsinghua University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
import os
import math
import sys
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load
from torch.utils.checkpoint import checkpoint
from torch import jit
from typing import Any, Dict, List, Optional, Tuple
####################### Cuda Version of DAG Oerations ####################
module_path = os.path.dirname(__file__)
dag_kernel = None
def get_dag_kernel():
global dag_kernel
if not torch.cuda.is_available():
raise RuntimeError("You need GPU to use the custom cuda operations")
if dag_kernel is not None:
return dag_kernel
else:
print("Start compiling cuda operations for DA-Transformer...(It usually takes a few minutes for the first time running.)", file=sys.stderr, flush=True)
if int(torch.version.cuda.split(".")[0]) < 11:
extra_include_paths = [os.path.join(module_path, "../../cub")]
else:
extra_include_paths = None
dag_kernel = load(
"dag_loss_fn",
sources=[
os.path.join(module_path, "dag_loss.cpp"),
os.path.join(module_path, "dag_loss.cu"),
os.path.join(module_path, "dag_best_alignment.cu"),
os.path.join(module_path, "logsoftmax_gather.cu"),
],
extra_cflags=['-DOF_SOFTMAX_USE_FAST_MATH', '-O3'],
extra_cuda_cflags=['-DOF_SOFTMAX_USE_FAST_MATH', '-O3'],
extra_include_paths=extra_include_paths,
)
print("Cuda operations compiled", file=sys.stderr, flush=True)
return dag_kernel
class DagLossFunc(Function):
config = 1
config1 = 2
config2 = 2
@staticmethod
def forward(
ctx,
match_all, # bsz * tarlen * prelen
links, # bsz * prelen * translen
output_length, # bsz
target_length, # bsz
):
r"""
Function to calculate the dag loss.
Input:
match_all (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_target_length, max_output_length]
match_all[b, i, j] represents -log P(y_i| v_j), the probability of predicting the i-th token in the reference
based on the j-th vertex.
(Note: float32 are preferred; float16 may cause precision problem)
links (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_output_length, max_transition_length]
links[b, i, j] represents the transition probability from the i-th vertex to **the (i+j)-th vertex**.
(Note: this parameter is different from the torch version)
output_length (torch.LongTensor):
Shape: [batch_size]
output_length should be the graph size, the vertices (index >= graph size) are ignored
target_length (torch.LongTensor):
Shape: [batch_size]
target_length is the reference length, the tokens (index >= target length) are ignored
Output (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size]
the loss of each sample
"""
require_gradient = ctx.needs_input_grad[0] or ctx.needs_input_grad[1]
match_all = match_all.contiguous()
links = links.contiguous()
alpha, beta = get_dag_kernel().dag_loss(match_all, links, output_length, target_length, require_gradient, DagLossFunc.config) # bsz * prelen * tarlen
if require_gradient:
res = beta[:, 0, 0].clone()
else:
res = alpha[range(alpha.shape[0]), target_length - 1, output_length - 1]
ctx.save_for_backward(alpha, beta, match_all, links, output_length, target_length)
return res
@staticmethod
def backward(ctx, grad_output):
alpha, beta, match_all, links, output_length, target_length = ctx.saved_tensors
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
grad_match_all, grad_links = get_dag_kernel().dag_loss_backward(grad_output, alpha, beta, match_all, links, output_length, target_length, DagLossFunc.config1, DagLossFunc.config2)
return grad_match_all, grad_links, None, None
else:
return None, None, None, None
dag_loss = DagLossFunc.apply
class DagBestAlignmentFunc(Function):
config = 1
@staticmethod
def forward(
ctx,
match_all, # bsz * tarlen * prelen
links, # bsz * prelen * translen
output_length, # bsz
target_length, # bsz
):
r"""
Function to obtain the alignment between prediction and reference
Input:
match_all (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_target_length, max_output_length]
match_all[b, i, j] represents -log P(y_i| v_j), the probability of predicting the i-th token in the reference
based on the j-th vertex.
(Note: float32 are preferred; float16 may cause precision problem)
links (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_output_length, max_transition_length]
links[b, i, j] represents the transition probability from the i-th vertex to **the (i+j)-th vertex**.
(Note: this parameter is different from the torch version)
output_length (torch.LongTensor):
Shape: [batch_size]
output_length should be the graph size, the vertices (index >= graph size) are ignored
target_length (torch.LongTensor):
Shape: [batch_size]
target_length is the reference length, the tokens (index >= target length) are ignored
Output (torch.LongTensor):
Shape: [batch_size, max_output_length]
if output[b, i]>=0, it represents the index of target token aligned with the i-th vertex
otherwise, output[b, i] = -1, it represents the i-th vertex is not aligned with any target token
"""
match_all = match_all.contiguous()
links = links.contiguous()
alpha, path = get_dag_kernel().dag_best_alignment(match_all, links, output_length, target_length, DagBestAlignmentFunc.config) # bsz * prelen * tarlen
path = path.to(torch.long)
ctx.mark_non_differentiable(path)
return path
@staticmethod
def backward(ctx, grad_output):
assert False, "no backward function for best alignment"
dag_best_alignment = DagBestAlignmentFunc.apply
class DagLogsoftmaxGatherFunc(Function):
@staticmethod
def forward(
ctx,
word_ins_out, # bsz * prelen * vocabsize
select_idx # bsz * prelen * slen
):
r"""
This function is equivalent to the below codes:
res = word_ins_out.log_softmax(dim=-1, dtype=torch.float).gather(-1, select_idx)
Note: to reduce memory usage, word_ins_out is modified in place for storing backward tensors.
DO NOT use word_ins_out after this function.
If you do not like the side effect, please use the torch version instead
Input:
word_ins_out (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_output_length, vocab_size]
the unnormalized logits
select_idx (torch.LongTensor):
Shape: [batch_size, max_output_length, select_id_size]
index in gather function
Output:
modified_word_ins_out (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_output_length, vocab_size]
modified word_ins_out, do not use it
selected_result (torch.FloatTensor):
Shape: [batch_size, max_output_length, select_id_size]
"""
require_gradient = ctx.needs_input_grad[0]
selected_result = get_dag_kernel().logsoftmax_gather(word_ins_out, select_idx, require_gradient)
# Note: the cuda kernel will modify word_ins_out and then reuse it in backward
ctx.mark_dirty(word_ins_out)
ctx.set_materialize_grads(False)
if require_gradient:
ctx.save_for_backward(word_ins_out, select_idx)
ctx.has_backward = False
return word_ins_out, selected_result # bsz * prelen * slen
@staticmethod
def backward(ctx, grad_word_ins_out, grad_output):
if not ctx.needs_input_grad[0]:
return None, None
assert grad_word_ins_out is None, "Cannot reuse word_ins_out after logsoftmax_gather"
if grad_output is None:
return None, None
assert not ctx.has_backward, "Cannot backward twice in logsoftmax_gather"
ctx.has_backward = True
grad_input, selected_idx = ctx.saved_tensors
grad_input.mul_(grad_output.sum(-1, keepdim=True).neg_().to(grad_input.dtype))
grad_input.scatter_add_(-1, selected_idx, grad_output.to(grad_input.dtype))
return grad_input, None
dag_logsoftmax_gather_inplace = DagLogsoftmaxGatherFunc.apply
####################### Torch Version of DAG Oerations ####################
@jit.script
def logsumexp_keepdim(x: Tensor, dim: int) -> Tensor:
# Solving nan issue when x contains -inf
# See https://github.com/pytorch/pytorch/issues/31829
m, _ = x.max(dim=dim, keepdim=True)
mask = m == -float('inf')
m = m.detach()
s = (x - m.masked_fill_(mask, 0)).exp_().sum(dim=dim, keepdim=True)
return s.masked_fill_(mask, 1).log_() + m.masked_fill_(mask, -float('inf'))
@jit.script
def loop_function_noempty(last_f: Tensor, links: Tensor, match: Tensor) -> Tensor:
f_next = logsumexp_keepdim(last_f + links, 1) # batch * 1 * prelen
f_next = f_next.transpose(1, 2) + match # batch * prelen * 1
return f_next
@jit.script
def loop_function_noempty_max(last_f: Tensor, links: Tensor, match: Tensor) -> Tensor:
f_next = torch.max(last_f + links, dim=1)[0] # batch * 1 * prelen
f_next = f_next.unsqueeze(-1) + match # batch * prelen * 1
return f_next
def torch_dag_loss(match_all, links, output_length, target_length):
r"""
Function to calculate the dag loss.
Input:
match_all (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_target_length, max_output_length]
match_all[b, i, j] represents -log P(y_i| v_j), the probability of predicting the i-th token in the reference
based on the j-th vertex.
(Note: float32 are preferred; float16 may cause precision problem)
links (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_output_length, max_transition_length]
links[b, i, j] represents the transition probability from the i-th vertex to **the j-th vertex**.
(Note: this parameter is different from the cuda version)
output_length (torch.LongTensor):
Shape: [batch_size]
output_length should be the graph size, the vertices (index >= graph size) are ignored
target_length (torch.LongTensor):
Shape: [batch_size]
target_length is the reference length, the tokens (index >= target length) are ignored
Output (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size]
the loss of each sample
"""
match_all = match_all.transpose(1, 2)
batch_size, prelen, tarlen = match_all.shape
assert links.shape[1] == links.shape[2], "links should be batch_size * prelen * prelen"
f_arr = []
f_init = torch.zeros(batch_size, prelen, 1, dtype=match_all.dtype, device=match_all.device).fill_(float("-inf"))
f_init[:, 0, 0] = match_all[:, 0, 0]
f_arr.append(f_init)
match_all_chunk = torch.chunk(match_all, tarlen, -1) # k * [batch * prelen * 1]
for k in range(1, tarlen):
f_now = loop_function_noempty(f_arr[-1], links, match_all_chunk[k])
f_arr.append(f_now)
loss_result = torch.cat(f_arr, -1)[range(batch_size), output_length - 1, target_length - 1]
return loss_result
def __torch_max_loss(match_all, links, output_length, target_length):
match_all = match_all.transpose(1, 2)
batch_size, prelen, tarlen = match_all.shape
assert links.shape[1] == links.shape[2], "links should be batch_size * prelen * prelen"
f_arr = []
f_init = torch.zeros(batch_size, prelen, 1, dtype=match_all.dtype, device=match_all.device).fill_(float("-inf"))
f_init[:, 0, 0] = match_all[:, 0, 0]
f_arr.append(f_init)
match_arr = torch.chunk(match_all, tarlen, -1)
for i in range(1, tarlen):
f_now = loop_function_noempty_max(f_arr[-1], links, match_arr[i])
f_arr.append(f_now)
alllogprob = torch.cat(f_arr, -1)[range(batch_size), output_length - 1, target_length - 1]
return alllogprob
def torch_dag_best_alignment(match_all, links, output_length, target_length):
r"""
Function to obtain the alignment between prediction and reference
Input:
match_all (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_target_length, max_output_length]
match_all[b, i, j] represents -log P(y_i| v_j), the probability of predicting the i-th token in the reference
based on the j-th vertex.
(Note: float32 are preferred; float16 may cause precision problem)
links (torch.FloatTensor or torch.HalfTensor):
Shape: [batch_size, max_output_length, max_transition_length]
links[b, i, j] represents the transition probability from the i-th vertex to **the j-th vertex**.
(Note: this parameter is different from the cuda version)
output_length (torch.LongTensor):
Shape: [batch_size]
output_length should be the graph size, the vertices (index >= graph size) are ignored
target_length (torch.LongTensor):
Shape: [batch_size]
target_length is the reference length, the tokens (index >= target length) are ignored
Output (torch.LongTensor):
Shape: [batch_size, max_output_length]
if output[b, i]>=0, it represents the index of target token aligned with the i-th vertex
otherwise, output[b, i] = -1, it represents the i-th vertex is not aligned with any target token
"""
with torch.enable_grad():
match_all.requires_grad_()
alllogprob = __torch_max_loss(match_all, links, output_length, target_length)
matchgrad = torch.autograd.grad(alllogprob.sum(), [match_all])[0] # batch * talen * prelen
pathvalue, path = matchgrad.max(dim=1)
path.masked_fill_(pathvalue < 0.5, -1)
return path
def torch_dag_logsoftmax_gather_inplace(word_ins_out, select_idx):
r""" Fused operation of log_softmax and gather"""
logits = torch.log_softmax(word_ins_out, -1, dtype=torch.float32)
match = logits.gather(dim=-1, index=select_idx)
return word_ins_out, match
####################### For Config Tuning ######################
# The below codes are only used for testing
################################################################
if __name__ == "__main__":
import numpy as np
import random
from collections import defaultdict
from itertools import product
import tqdm
def restore_valid_links(links):
# batch * prelen * trans_len
batch_size, prelen, translen = links.shape
valid_links_idx = torch.arange(prelen, dtype=torch.long, device=links.device).unsqueeze(1) + \
torch.arange(translen, dtype=torch.long, device=links.device).unsqueeze(0) + 1
invalid_idx_mask = valid_links_idx >= prelen
valid_links_idx.masked_fill_(invalid_idx_mask, prelen)
res = torch.zeros(batch_size, prelen, prelen + 1, dtype=torch.float, device=links.device).fill_(float("-inf"))
res.scatter_(2, valid_links_idx.unsqueeze(0).expand(batch_size, -1, -1), links)
return res[:, :, :prelen]
def random_check_loss(bsz, prelen, tarlen, translen, config=1, config1=1, config2=1):
# print(bsz, prelen, tarlen, translen)
DagLossFunc.config = config
DagLossFunc.config1 = config1
DagLossFunc.config2 = config2
match_all = torch.rand(bsz, tarlen, prelen).cuda().requires_grad_()
links = torch.rand(bsz, prelen, translen).cuda().log_softmax(dim=-1).requires_grad_()
# easy case
output_length = torch.ones(bsz, dtype=torch.long).cuda() * prelen
target_length = torch.ones(bsz, dtype=torch.long).cuda() * tarlen
output_length -= torch.randint(0, min(5, prelen), output_length.shape, device=output_length.device)
target_length -= torch.randint(0, min(5, tarlen), target_length.shape, device=target_length.device)
import time
torch.cuda.synchronize()
start = time.time()
res = dag_loss(match_all, links, output_length, target_length)
torch.cuda.synchronize()
atime = time.time() - start
# print("cuda :", atime)
start = time.time()
res2 = torch_dag_loss(match_all, restore_valid_links(links), output_length, target_length)
torch.cuda.synchronize()
btime = time.time() - start
# print("torch:", btime)
assert torch.allclose(res, res2, rtol=1e-03, atol=1e-04)
# return atime, btime
start = time.time()
gA, gB = torch.autograd.grad(res.mean(), [match_all, links], retain_graph=True)
torch.cuda.synchronize()
ctime = time.time() - start
# print("cuda grad:", ctime)
start = time.time()
rA, rB = torch.autograd.grad(res2.mean(), [match_all, links], retain_graph=True)
dtime = time.time() - start
# print("torch grad:", dtime)
assert torch.allclose(gA, rA)
assert torch.allclose(gB, rB)
return atime, btime, ctime, dtime
@torch.no_grad()
def torch_check_best_alignemnt(alpha, path, match_all, links, output_length, target_length):
batch_size, tarlen, prelen = match_all.shape
res = alpha[range(batch_size), target_length - 1, output_length - 1]
pos = torch.zeros(batch_size, device="cuda", dtype=torch.long)
tid = torch.zeros(batch_size, device="cuda", dtype=torch.long)
nowres = match_all[range(batch_size), tid, pos]
for i in range(1, prelen):
tid += (path[:, i] >= 0).int()
nextpos = (torch.ones_like(pos) * i).masked_fill(path[:, i] < 0, 0) + pos.masked_fill(path[:, i] >= 0, 0)
nowres += (links[range(batch_size), pos, (-pos + i - 1).clip(min=0)] + match_all[range(batch_size), tid, nextpos]) * (path[:, i] >= 0).float()
pos = nextpos
return torch.allclose(res, nowres)
def random_check_align(bsz, prelen, tarlen, translen, config=1):
# print(bsz, prelen, tarlen, translen)
DagBestAlignmentFunc.config = config
match_all = torch.rand(bsz, tarlen, prelen).cuda().requires_grad_()
links = torch.rand(bsz, prelen, translen).cuda().log_softmax(dim=-1).requires_grad_()
# easy case
output_length = torch.ones(bsz, dtype=torch.long).cuda() * prelen
target_length = torch.ones(bsz, dtype=torch.long).cuda() * tarlen
output_length -= torch.randint(0, min(5, prelen), output_length.shape, device=output_length.device)
target_length -= torch.randint(0, min(5, tarlen), target_length.shape, device=target_length.device)
import time
torch.cuda.synchronize()
start = time.time()
alpha, path = get_dag_kernel().dag_best_alignment(match_all, links, output_length, target_length, DagBestAlignmentFunc.config)
res = alpha[range(bsz), target_length - 1, output_length - 1]
torch.cuda.synchronize()
atime = time.time() - start
# print("cuda :", atime)
start = time.time()
path2 = torch_dag_best_alignment(match_all, restore_valid_links(links), output_length, target_length)
torch.cuda.synchronize()
btime = time.time() - start
# print("torch:", btime)
res2 = __torch_max_loss(match_all, restore_valid_links(links), output_length, target_length)
assert torch.allclose(res, res2, rtol=1e-03, atol=1e-04)
assert torch_check_best_alignemnt(alpha, path, match_all, links, output_length, target_length)
assert torch_check_best_alignemnt(alpha, path2, match_all, links, output_length, target_length)
return atime, btime
def random_check_gather(bsz, prelen, tarlen, vocabsize):
word_ins_out = torch.rand(bsz, prelen, vocabsize, dtype=torch.float16, device="cuda").requires_grad_()
select_idx = torch.randint(0, vocabsize - 1, (bsz, prelen, tarlen), device="cuda")
import time
torch.cuda.synchronize()
start = time.time()
_, res = dag_logsoftmax_gather_inplace(word_ins_out.clone(), select_idx)
ga = torch.autograd.grad(res.sum() / res.shape[2], [word_ins_out], retain_graph=True)[0]
torch.cuda.synchronize()
atime = time.time() - start
# print("cuda :", atime)
start = time.time()
_, res2 = torch_dag_logsoftmax_gather_inplace(word_ins_out, select_idx)
ra = torch.autograd.grad(res2.sum() / res.shape[2], [word_ins_out], retain_graph=True)[0]
torch.cuda.synchronize()
btime = time.time() - start
# print("torch:", btime)
assert torch.allclose(res, res2, rtol=1e-3, atol=1e-4)
assert torch.allclose(ga, ra, rtol=1e-3, atol=1e-4)
return atime, btime
def tune_config(skip_forward=False, skip_backward=False, skip_align=False, skip_gather=False):
config_list = [1,2,3,4]
config1_list = [1,2]
config2_list = [1,2,3]
configalign_list = [1,2,3,4]
forward_best = DagLossFunc.config
backward_best = (DagLossFunc.config1, DagLossFunc.config2)
align_best = DagBestAlignmentFunc.config
if not skip_forward:
print("########### Forward Tuning #############")
a_res, b_res = defaultdict(list), defaultdict(list)
for i in tqdm.tqdm(range(100)):
for config in config_list:
SEED = i
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
tarlen = random.randint(40, 60)
bsz = 4096 // tarlen
factor = 8
# print(f"run {i}")
a, b, c, d = random_check_loss(bsz, tarlen * factor, tarlen, factor * 4, config=config)
# a, b = random_check(1, 8, 4, 4)
if i > 0:
a_res[config].append(a)
b_res[config].append(b)
forward_res = []
for config in config_list:
forward_res.append(np.mean(b_res[config]) / np.mean(a_res[config]))
print(f"{config}: {np.mean(a_res[config]):.6f} {np.mean(b_res[config]):.6f} {forward_res[-1]:.2f}")
forward_best = config_list[np.argmax(forward_res)]
print(f"Best Choice: {forward_best}")
if not skip_backward:
print("########### Backward Tuning #############")
c_res, d_res = defaultdict(list), defaultdict(list)
for i in tqdm.tqdm(range(50)):
for config1, config2 in product(config1_list, config2_list):
SEED = i
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
tarlen = random.randint(40, 60)
bsz = 4096 // tarlen
factor = 8
a, b, c, d = random_check_loss(bsz, tarlen * factor, tarlen, factor * 4, config=forward_best, config1=config1, config2=config2)
# a, b = random_check(1, 8, 4, 4)
if i > 0:
c_res[(config1, config2)].append(c)
d_res[(config1, config2)].append(d)
backward_res = []
for config1, config2 in product(config1_list, config2_list):
backward_res.append(np.mean(d_res[(config1, config2)]) / np.mean(c_res[(config1, config2)]))
print(f"{config1, config2}: {np.mean(c_res[(config1, config2)]):.6f} {np.mean(d_res[(config1, config2)]):.6f} {backward_res[-1]:.2f}")
backward_best = list(product(config1_list, config2_list))[np.argmax(backward_res)]
print(f"Best Choice: {backward_best}")
if not skip_align:
print("########### Align Tuning #############")
a_res, b_res = defaultdict(list), defaultdict(list)
for i in tqdm.tqdm(range(30)):
for config in configalign_list:
SEED = i
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
tarlen = random.randint(40, 60)
bsz = 4096 // tarlen
factor = 8
# print(f"run {i}")
a, b = random_check_align(bsz, tarlen * factor, tarlen, factor * 4, config=config)
# a, b = random_check(1, 8, 4, 4)
if i > 0:
a_res[config].append(a)
b_res[config].append(b)
align_res = []
for config in configalign_list:
align_res.append(np.mean(b_res[config]) / np.mean(a_res[config]))
print(f"{config}: {np.mean(a_res[config]):.6f} {np.mean(b_res[config]):.6f} {align_res[-1]:.2f}")
align_best = configalign_list[np.argmax(align_res)]
print(f"Best Choice: {align_best}")
if not skip_gather:
print("########### Test Gather #############")
a_res, b_res = defaultdict(list), defaultdict(list)
for i in tqdm.tqdm(range(100)):
SEED = i
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
tarlen = random.randint(40, 60)
bsz = 4096 // tarlen
factor = 8
vocabsize = random.randint(12345, 23456)
a, b = random_check_gather(bsz, tarlen * factor, tarlen, vocabsize)
if i > 0:
a_res[0].append(a)
b_res[0].append(b)
gather_res = np.mean(b_res[0]) / np.mean(a_res[0])
print(f"{np.mean(a_res[0]):.6f} {np.mean(b_res[0]):.6f} {gather_res:.2f}")
DagLossFunc.config = forward_best
DagLossFunc.config1 = backward_best[0]
DagLossFunc.config2 = backward_best[1]
DagBestAlignmentFunc.config = align_best
tune_config() | 27,898 | 42.72884 | 191 | py |
null | DA-Transformer-main/fs_plugins/custom_ops/utilities.h | #define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 70000
#define if_constexpr(expression) if constexpr (expression)
#else
#define if_constexpr(expression) if(expression)
#endif
| 276 | 26.7 | 58 | h |
null | DA-Transformer-main/fs_plugins/models/__init__.py | import os
import importlib
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fs_plugins.models." + file_name)
| 317 | 34.333333 | 68 | py |