_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q26300
|
activation
|
train
|
def activation(data: mx.sym.Symbol, act_type: str) -> mx.sym.Symbol:
"""
Apply custom or standard activation.
Custom activation types include:
- Swish-1, also called Sigmoid-Weighted Linear Unit (SiLU): Ramachandran et
al. (https://arxiv.org/pdf/1710.05941.pdf), Elfwing et al.
(https://arxiv.org/pdf/1702.03118.pdf)
- Gaussian Error Linear Unit (GELU): Hendrycks and Gimpel
(https://arxiv.org/pdf/1606.08415.pdf)
:param data: input Symbol of any shape.
:param act_type: Type of activation.
:return: output Symbol with same shape as input.
"""
# TODO: Contribute these to MXNet? For now it appears that registered activation types must be implemented in C++.
if act_type == C.SWISH1:
return data * mx.sym.Activation(data, act_type="sigmoid")
elif act_type == C.GELU:
# Approximation of x * gaussian_cdf(x) used by Hendrycks and Gimpel
return 0.5 * data * (1 + mx.sym.Activation((math.sqrt(2 / math.pi) * (data + (0.044715 * (data**3)))),
act_type="tanh"))
else:
return mx.sym.Activation(data, act_type=act_type)
|
python
|
{
"resource": ""
}
|
q26301
|
split_heads
|
train
|
def split_heads(x: mx.sym.Symbol, depth_per_head: int, heads: int) -> mx.sym.Symbol:
"""
Returns a symbol with head dimension folded into batch and depth divided by the number of heads.
:param x: Symbol of shape (batch, length, depth).
:param depth_per_head: Depth per head.
:param heads: Number of heads.
:return: Symbol of shape (batch * heads, length, depth_per_heads).
"""
# (batch, length, heads, depth_per_head)
x = mx.sym.reshape(data=x, shape=(0, -1, heads, depth_per_head))
# (batch, heads, length, depth/heads)
x = mx.sym.transpose(data=x, axes=(0, 2, 1, 3))
# (batch * heads, length, depth/heads)
return mx.sym.reshape(data=x, shape=(-3, -1, depth_per_head))
|
python
|
{
"resource": ""
}
|
q26302
|
dot_attention
|
train
|
def dot_attention(queries: mx.sym.Symbol,
keys: mx.sym.Symbol,
values: mx.sym.Symbol,
lengths: Optional[mx.sym.Symbol] = None,
dropout: float = 0.0,
bias: Optional[mx.sym.Symbol] = None,
prefix: Optional[str] = ''):
"""
Computes dot attention for a set of queries, keys, and values.
:param queries: Attention queries. Shape: (n, lq, d).
:param keys: Attention keys. Shape: (n, lk, d).
:param values: Attention values. Shape: (n, lk, dv).
:param lengths: Optional sequence lengths of the keys. Shape: (n,).
:param dropout: Dropout probability.
:param bias: Optional 3d bias tensor.
:param prefix: Optional prefix
:return: 'Context' vectors for each query. Shape: (n, lq, dv).
"""
utils.check_condition(lengths is not None or bias is not None,
"Must provide either length or bias argument for masking")
# (n, lq, lk)
logits = mx.sym.batch_dot(lhs=queries, rhs=keys, transpose_b=True, name='%sdot' % prefix)
if lengths is not None:
# mask lk dimension
# (lk, n, lq)
logits = mx.sym.transpose(data=logits, axes=(2, 0, 1))
logits = mx.sym.SequenceMask(data=logits,
use_sequence_length=True,
sequence_length=lengths,
value=C.LARGE_NEGATIVE_VALUE)
# (n, lq, lk)
logits = mx.sym.transpose(data=logits, axes=(1, 2, 0))
if bias is not None:
logits = mx.sym.broadcast_add(logits, bias, name='%sbias_add' % prefix)
probs = mx.sym.softmax(logits, axis=-1)
probs = mx.sym.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return mx.sym.batch_dot(lhs=probs, rhs=values, name='%scontexts' % prefix)
|
python
|
{
"resource": ""
}
|
q26303
|
LengthRatio.average_sources
|
train
|
def average_sources(source_encoded: mx.sym.Symbol, source_encoded_length: mx.sym.Symbol) -> mx.nd.NDArray:
"""
Calculate the average of encoded sources taking into account their lengths.
:param source_encoded: Encoder representation for n elements. Shape: (n, source_encoded_length, hidden_size).
:param source_encoded_length: A vector of encoded sequence lengths. Shape: (n,).
:return: Average vectors. Shape(n, hidden_size).
"""
# source_masked: (n, source_encoded_length, hidden_size)
source_masked = mx.sym.SequenceMask(data=source_encoded,
axis=1,
sequence_length=source_encoded_length,
use_sequence_length=True,
value=0.)
# calculate the proper means of encoded sources
averaged = mx.sym.broadcast_div(mx.sym.sum(source_masked, axis=1, keepdims=False),
mx.sym.reshape(source_encoded_length, shape=(-1, 1)))
return averaged
|
python
|
{
"resource": ""
}
|
q26304
|
MultiHeadAttentionBase._attend
|
train
|
def _attend(self,
queries: mx.sym.Symbol,
keys: mx.sym.Symbol,
values: mx.sym.Symbol,
lengths: Optional[mx.sym.Symbol] = None,
bias: Optional[mx.sym.Symbol] = None) -> mx.sym.Symbol:
"""
Returns context vectors of multi-head dot attention.
:param queries: Query tensor. Shape: (batch_size, query_max_length, depth).
:param keys: Keys. Shape: (batch_size, memory_max_length, depth).
:param values: Values. Shape: (batch_size, memory_max_length, depth).
:param lengths: Optional lengths of keys. Shape: (batch_size,).
:param bias: Optional 3d bias.
:return: Context vectors. Shape: (batch_size, query_max_length, output_depth).
"""
# scale by sqrt(depth_per_head)
queries = queries * (self.depth_per_head ** -0.5)
# (batch*heads, length, depth/heads)
queries = split_heads(queries, self.depth_per_head, self.heads)
keys = split_heads(keys, self.depth_per_head, self.heads)
values = split_heads(values, self.depth_per_head, self.heads)
lengths = broadcast_to_heads(lengths, self.heads, ndim=1, fold_heads=True) if lengths is not None else lengths
# (batch*heads, query_max_length, depth_per_head)
contexts = dot_attention(queries, keys, values,
lengths=lengths, dropout=self.dropout, bias=bias, prefix=self.prefix)
# (batch, query_max_length, depth)
contexts = combine_heads(contexts, self.depth_per_head, self.heads)
# contexts: (batch, query_max_length, output_depth)
contexts = mx.sym.FullyConnected(data=contexts,
weight=self.w_h2o,
no_bias=True,
num_hidden=self.depth_out,
flatten=False)
return contexts
|
python
|
{
"resource": ""
}
|
q26305
|
rerank
|
train
|
def rerank(args: argparse.Namespace):
"""
Reranks a list of hypotheses according to a sentence-level metric.
Writes all output to STDOUT.
:param args: Namespace object holding CLI arguments.
"""
reranker = Reranker(args.metric, args.return_score)
with utils.smart_open(args.reference) as reference, utils.smart_open(args.hypotheses) as hypotheses:
for i, (reference_line, hypothesis_line) in enumerate(zip(reference, hypotheses), 1):
reference = reference_line.strip()
# Expects a JSON object with keys containing at least 'translations',
# as returned by sockeye.translate's nbest output
hypotheses = json.loads(hypothesis_line.strip())
utils.check_condition('translations' in hypotheses,
"Reranking requires nbest JSON input with 'translations' key present.")
num_hypotheses = len(hypotheses['translations'])
if not num_hypotheses > 1:
logger.info("Line %d contains %d hypotheses. Nothing to rerank.", i, num_hypotheses)
reranked_hypotheses = hypotheses
else:
reranked_hypotheses = reranker.rerank(hypotheses, reference)
if args.output_best:
if not num_hypotheses:
print()
else:
print(reranked_hypotheses['translations'][0])
else:
print(json.dumps(reranked_hypotheses, sort_keys=True))
|
python
|
{
"resource": ""
}
|
q26306
|
main
|
train
|
def main():
"""
Commandline interface to rerank nbest lists.
"""
log.setup_main_logger(console=True, file_logging=False)
log.log_sockeye_version(logger)
params = argparse.ArgumentParser(description="Rerank nbest lists of translations."
" Reranking sorts a list of hypotheses according"
" to their score compared to a common reference.")
arguments.add_rerank_args(params)
args = params.parse_args()
logger.info(args)
rerank(args)
|
python
|
{
"resource": ""
}
|
q26307
|
Reranker.rerank
|
train
|
def rerank(self, hypotheses: Dict[str, Any], reference: str) -> Dict[str, Any]:
"""
Reranks a set of hypotheses that belong to one single reference
translation. Uses stable sorting.
:param hypotheses: Nbest translations.
:param reference: A single string with the actual reference translation.
:return: Nbest translations sorted by reranking scores.
"""
scores = [self.scoring_function(hypothesis, reference) for hypothesis in hypotheses['translations']]
ranking = list(np.argsort(scores, kind='mergesort')[::-1]) # descending
reranked_hypotheses = self._sort_by_ranking(hypotheses, ranking)
if self.return_score:
reranked_hypotheses['scores'] = [scores[i] for i in ranking]
return reranked_hypotheses
|
python
|
{
"resource": ""
}
|
q26308
|
Loss.get_loss
|
train
|
def get_loss(self, logits: mx.sym.Symbol, labels: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Returns loss and softmax output symbols given logits and integer-coded labels.
:param logits: Shape: (batch_size * target_seq_len, target_vocab_size).
:param labels: Shape: (batch_size * target_seq_len,).
:return: Loss symbol.
"""
raise NotImplementedError()
|
python
|
{
"resource": ""
}
|
q26309
|
CrossEntropyLoss.get_loss
|
train
|
def get_loss(self, logits: mx.sym.Symbol, labels: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Returns loss symbol given logits and integer-coded labels.
:param logits: Shape: (batch_size * target_seq_len, target_vocab_size).
:param labels: Shape: (batch_size * target_seq_len,).
:return: List of loss symbols.
"""
if self.loss_config.normalization_type == C.LOSS_NORM_VALID:
normalization = "valid"
elif self.loss_config.normalization_type == C.LOSS_NORM_BATCH:
normalization = "null"
else:
raise ValueError("Unknown loss normalization type: %s" % self.loss_config.normalization_type)
return mx.sym.SoftmaxOutput(data=logits,
label=labels,
ignore_label=self.ignore_label,
use_ignore=True,
normalization=normalization,
smooth_alpha=self.loss_config.label_smoothing,
name=self.name)
|
python
|
{
"resource": ""
}
|
q26310
|
MSELoss.get_loss
|
train
|
def get_loss(self, pred: mx.sym.Symbol, labels: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Returns MSE loss and output symbol given logits and expected integers as labels.
:param pred: Predictions. Shape: (batch_size, 1).
:param labels: Targets. Shape: (batch_size,).
:return: Loss symbol.
"""
labels = mx.sym.reshape(labels, shape=(-1, 1))
loss_value = self.loss_config.length_task_weight / 2 * mx.sym.square(pred - labels)
loss_value = mx.sym.MakeLoss(data=loss_value,
normalization='batch',
name=self.name)
return loss_value
|
python
|
{
"resource": ""
}
|
q26311
|
LengthRatioMSEMetric.update_dict
|
train
|
def update_dict(self, label: Dict, pred: Dict):
"""
If label is missing the right name, copy it from the prediction.
"""
if not set(self.label_names).issubset(set(label.keys())):
label.update({name:pred[name] for name in self.label_names})
super().update_dict(label, pred)
|
python
|
{
"resource": ""
}
|
q26312
|
check_version
|
train
|
def check_version(version: str):
"""
Checks given version against code version and determines compatibility.
Throws if versions are incompatible.
:param version: Given version.
"""
code_version = parse_version(__version__)
given_version = parse_version(version)
check_condition(code_version[0] == given_version[0],
"Given release version (%s) does not match release code version (%s)" % (version, __version__))
check_condition(code_version[1] == given_version[1],
"Given major version (%s) does not match major code version (%s)" % (version, __version__))
|
python
|
{
"resource": ""
}
|
q26313
|
load_version
|
train
|
def load_version(fname: str) -> str:
"""
Loads version from file.
:param fname: Name of file to load version from.
:return: Version string.
"""
if not os.path.exists(fname):
logger.warning("No version file found. Defaulting to 1.0.3")
return "1.0.3"
with open(fname) as inp:
return inp.read().strip()
|
python
|
{
"resource": ""
}
|
q26314
|
parse_version
|
train
|
def parse_version(version_string: str) -> Tuple[str, str, str]:
"""
Parse version string into release, major, minor version.
:param version_string: Version string.
:return: Tuple of strings.
"""
release, major, minor = version_string.split(".", 2)
return release, major, minor
|
python
|
{
"resource": ""
}
|
q26315
|
log_basic_info
|
train
|
def log_basic_info(args) -> None:
"""
Log basic information like version number, arguments, etc.
:param args: Arguments as returned by argparse.
"""
log_sockeye_version(logger)
log_mxnet_version(logger)
logger.info("Command: %s", " ".join(sys.argv))
logger.info("Arguments: %s", args)
|
python
|
{
"resource": ""
}
|
q26316
|
save_graph
|
train
|
def save_graph(symbol: mx.sym.Symbol, filename: str, hide_weights: bool = True):
"""
Dumps computation graph visualization to .pdf and .dot file.
:param symbol: The symbol representing the computation graph.
:param filename: The filename to save the graphic to.
:param hide_weights: If true the weights will not be shown.
"""
dot = mx.viz.plot_network(symbol, hide_weights=hide_weights)
dot.render(filename=filename)
|
python
|
{
"resource": ""
}
|
q26317
|
compute_lengths
|
train
|
def compute_lengths(sequence_data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Computes sequence lengths of PAD_ID-padded data in sequence_data.
:param sequence_data: Input data. Shape: (batch_size, seq_len).
:return: Length data. Shape: (batch_size,).
"""
return mx.sym.sum(sequence_data != C.PAD_ID, axis=1)
|
python
|
{
"resource": ""
}
|
q26318
|
save_params
|
train
|
def save_params(arg_params: Mapping[str, mx.nd.NDArray], fname: str,
aux_params: Optional[Mapping[str, mx.nd.NDArray]] = None):
"""
Saves the parameters to a file.
:param arg_params: Mapping from parameter names to the actual parameters.
:param fname: The file name to store the parameters in.
:param aux_params: Optional mapping from parameter names to the auxiliary parameters.
"""
save_dict = {('arg:%s' % k): v.as_in_context(mx.cpu()) for k, v in arg_params.items()}
if aux_params is not None:
save_dict.update({('aux:%s' % k): v.as_in_context(mx.cpu()) for k, v in aux_params.items()})
mx.nd.save(fname, save_dict)
|
python
|
{
"resource": ""
}
|
q26319
|
load_params
|
train
|
def load_params(fname: str) -> Tuple[Dict[str, mx.nd.NDArray], Dict[str, mx.nd.NDArray]]:
"""
Loads parameters from a file.
:param fname: The file containing the parameters.
:return: Mapping from parameter names to the actual parameters for both the arg parameters and the aux parameters.
"""
save_dict = mx.nd.load(fname)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
"""TODO(fhieber):
temporary weight split for models with combined weight for keys & values
in transformer source attention layers. This can be removed once with the next major version change."""
if "att_enc_kv2h_weight" in name:
logger.info("Splitting '%s' parameters into separate k & v matrices.", name)
v_split = mx.nd.split(v, axis=0, num_outputs=2)
arg_params[name.replace('kv2h', "k2h")] = v_split[0]
arg_params[name.replace('kv2h', "v2h")] = v_split[1]
else:
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params
|
python
|
{
"resource": ""
}
|
q26320
|
get_tokens
|
train
|
def get_tokens(line: str) -> Iterator[str]:
"""
Yields tokens from input string.
:param line: Input string.
:return: Iterator over tokens.
"""
for token in line.rstrip().split():
if len(token) > 0:
yield token
|
python
|
{
"resource": ""
}
|
q26321
|
plot_attention
|
train
|
def plot_attention(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str], filename: str):
"""
Uses matplotlib for creating a visualization of the attention matrix.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param filename: The file to which the attention visualization will be written to.
"""
try:
import matplotlib
except ImportError:
raise RuntimeError("Please install matplotlib.")
matplotlib.use("Agg")
import matplotlib.pyplot as plt
assert attention_matrix.shape[0] == len(target_tokens)
plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys")
plt.xlabel("target")
plt.ylabel("source")
plt.gca().set_xticks([i for i in range(0, len(target_tokens))])
plt.gca().set_yticks([i for i in range(0, len(source_tokens))])
plt.gca().set_xticklabels(target_tokens, rotation='vertical')
plt.gca().set_yticklabels(source_tokens)
plt.tight_layout()
plt.savefig(filename)
logger.info("Saved alignment visualization to " + filename)
|
python
|
{
"resource": ""
}
|
q26322
|
print_attention_text
|
train
|
def print_attention_text(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str],
threshold: float):
"""
Prints the attention matrix to standard out.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param threshold: The threshold for including an alignment link in the result.
"""
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for i, f_i in enumerate(source_tokens): # type: ignore
sys.stdout.write(" |")
for j in range(len(target_tokens)):
align_prob = attention_matrix[j, i]
if align_prob > threshold:
sys.stdout.write("(*)")
elif align_prob > 0.4:
sys.stdout.write("(?)")
else:
sys.stdout.write(" ")
sys.stdout.write(" | %s\n" % f_i)
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for k in range(max(map(len, target_tokens))):
sys.stdout.write(" ")
for word in target_tokens:
letter = word[k] if len(word) > k else " "
sys.stdout.write(" %s " % letter)
sys.stdout.write("\n")
sys.stdout.write("\n")
|
python
|
{
"resource": ""
}
|
q26323
|
average_arrays
|
train
|
def average_arrays(arrays: List[mx.nd.NDArray]) -> mx.nd.NDArray:
"""
Take a list of arrays of the same shape and take the element wise average.
:param arrays: A list of NDArrays with the same shape that will be averaged.
:return: The average of the NDArrays in the same context as arrays[0].
"""
if not arrays:
raise ValueError("arrays is empty.")
if len(arrays) == 1:
return arrays[0]
check_condition(all(arrays[0].shape == a.shape for a in arrays), "nd array shapes do not match")
return mx.nd.add_n(*arrays) / len(arrays)
|
python
|
{
"resource": ""
}
|
q26324
|
query_nvidia_smi
|
train
|
def query_nvidia_smi(device_ids: List[int], result_queue: multiprocessing.Queue) -> None:
"""
Runs nvidia-smi to determine the memory usage.
:param device_ids: A list of devices for which the the memory usage will be queried.
:param result_queue: The queue to which the result dictionary of device id mapping to a tuple of
(memory used, memory total) is added.
"""
device_id_strs = [str(device_id) for device_id in device_ids]
query = "--query-gpu=index,memory.used,memory.total"
format_arg = "--format=csv,noheader,nounits"
try:
sp = subprocess.Popen(['nvidia-smi', query, format_arg, "-i", ",".join(device_id_strs)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = sp.communicate()[0].decode("utf-8").rstrip().split("\n")
except OSError:
logger.exception("Failed calling nvidia-smi to query memory usage.")
result_queue.put({})
return
try:
memory_data = {}
for line in result:
gpu_id, mem_used, mem_total = line.split(",")
memory_data[int(gpu_id)] = (int(mem_used), int(mem_total))
result_queue.put(memory_data)
except:
logger.exception("Failed parsing nvidia-smi output %s", "\n".join(result))
result_queue.put({})
|
python
|
{
"resource": ""
}
|
q26325
|
get_gpu_memory_usage
|
train
|
def get_gpu_memory_usage(ctx: List[mx.context.Context]) -> Dict[int, Tuple[int, int]]:
"""
Returns used and total memory for GPUs identified by the given context list.
:param ctx: List of MXNet context devices.
:return: Dictionary of device id mapping to a tuple of (memory used, memory total).
"""
if isinstance(ctx, mx.context.Context):
ctx = [ctx]
ctx = [c for c in ctx if c.device_type == 'gpu']
if not ctx:
return {}
if shutil.which("nvidia-smi") is None:
logger.warning("Couldn't find nvidia-smi, therefore we assume no GPUs are available.")
return {}
device_ids = [c.device_id for c in ctx]
# Run from clean forkserver process to not leak any CUDA resources
mp_context = mp_utils.get_context()
result_queue = mp_context.Queue()
nvidia_smi_process = mp_context.Process(target=query_nvidia_smi, args=(device_ids, result_queue,))
nvidia_smi_process.start()
nvidia_smi_process.join()
memory_data = result_queue.get()
log_gpu_memory_usage(memory_data)
return memory_data
|
python
|
{
"resource": ""
}
|
q26326
|
acquire_gpus
|
train
|
def acquire_gpus(requested_device_ids: List[int], lock_dir: str = "/tmp",
retry_wait_min: int = 10, retry_wait_rand: int = 60,
num_gpus_available: Optional[int] = None):
"""
Acquire a number of GPUs in a transactional way. This method should be used inside a `with` statement.
Will try to acquire all the requested number of GPUs. If currently
not enough GPUs are available all locks will be released and we wait until we retry. Will retry until enough
GPUs become available.
:param requested_device_ids: The requested device ids, each number is either negative indicating the number of GPUs
that will be allocated, or positive indicating we want to acquire a specific device id.
:param lock_dir: The directory for storing the lock file.
:param retry_wait_min: The minimum number of seconds to wait between retries.
:param retry_wait_rand: Randomly add between 0 and `retry_wait_rand` seconds to the wait time.
:param num_gpus_available: The number of GPUs available, if None we will call get_num_gpus().
:return: yields a list of GPU ids.
"""
if num_gpus_available is None:
num_gpus_available = get_num_gpus()
if num_gpus_available == 0:
raise RuntimeError("Can not acquire GPU, as no GPUs were found on this machine.")
if not os.path.exists(lock_dir):
raise IOError("Lock directory %s does not exist." % lock_dir)
if not os.access(lock_dir, os.W_OK):
raise IOError("Lock directory %s is not writeable." % lock_dir)
# split the device ids into the specific ids requested and count up the number of arbitrary ids we want
# e.g. device_ids = [-3, 2, 5, 7, -5] means we want to acquire device 2, 5 and 7 plus 8 other devices.
specific_device_ids = set() # type: Set[int]
num_arbitrary_device_ids = 0
for device_id in requested_device_ids:
if device_id < 0:
num_gpus = -device_id
num_arbitrary_device_ids += num_gpus
else:
if device_id in specific_device_ids:
raise ValueError("Requested GPU %d twice." % device_id)
specific_device_ids.add(device_id)
# make sure we have enough GPUs available
num_gpus_requested = len(specific_device_ids) + num_arbitrary_device_ids
if num_gpus_requested > num_gpus_available:
raise ValueError("Requested %d GPUs, but only %d are available." % (num_gpus_requested, num_gpus_available))
logger.info("Attempting to acquire %d GPUs of %d GPUs. The requested devices are: %s",
num_gpus_requested, num_gpus_available, str(requested_device_ids))
# note: it's important to first allocate the specific device ids and then the others to not deadlock ourselves.
# for specific device ids we just have the device id itself as a candidate
candidates_to_request = [[device_id] for device_id in specific_device_ids]
# for the arbitrary device ids we take all remaining device ids as a list of candidates
remaining_device_ids = [device_id for device_id in range(num_gpus_available)
if device_id not in specific_device_ids]
candidates_to_request += [remaining_device_ids for _ in range(num_arbitrary_device_ids)]
while True:
with ExitStack() as exit_stack:
any_failed = False
acquired_gpus = [] # type: List[int]
with GpuFileLock(candidates=["master_lock"], lock_dir=lock_dir) as master_lock: # type: str
# Only one process, determined by the master lock, can try acquiring gpu locks at a time.
# This will make sure that we use consecutive device ids whenever possible.
if master_lock is not None:
for candidates in candidates_to_request:
gpu_id = exit_stack.enter_context(GpuFileLock(candidates=candidates, lock_dir=lock_dir))
if gpu_id is not None:
acquired_gpus.append(cast(int, gpu_id))
else:
if len(candidates) == 1:
logger.info("Could not acquire GPU %d. It's currently locked.", candidates[0])
any_failed = True
break
if master_lock is not None and not any_failed:
try:
yield acquired_gpus
except: # pylint: disable=try-except-raise
raise
return
# randomize so that multiple processes starting at the same time don't retry at a similar point in time
if retry_wait_rand > 0:
retry_wait_actual = retry_wait_min + random.randint(0, retry_wait_rand)
else:
retry_wait_actual = retry_wait_min
if master_lock is None:
logger.info("Another process is acquiring GPUs at the moment will try again in %ss." % retry_wait_actual)
else:
logger.info("Not enough GPUs available will try again in %ss." % retry_wait_actual)
time.sleep(retry_wait_actual)
|
python
|
{
"resource": ""
}
|
q26327
|
parse_metrics_line
|
train
|
def parse_metrics_line(line_number: int, line: str) -> Dict[str, Any]:
"""
Parse a line of metrics into a mappings of key and values.
:param line_number: Line's number for checking if checkpoints are aligned to it.
:param line: A line from the Sockeye metrics file.
:return: Dictionary of metric names (e.g. perplexity-train) mapping to a list of values.
"""
fields = line.split('\t')
checkpoint = int(fields[0])
check_condition(line_number == checkpoint,
"Line (%d) and loaded checkpoint (%d) do not align." % (line_number, checkpoint))
metric = dict() # type: Dict[str, Any]
for field in fields[1:]:
key, value = field.split("=", 1)
if value == 'True' or value == 'False':
metric[key] = (value == 'True')
else:
metric[key] = float(value)
return metric
|
python
|
{
"resource": ""
}
|
q26328
|
read_metrics_file
|
train
|
def read_metrics_file(path: str) -> List[Dict[str, Any]]:
"""
Reads lines metrics file and returns list of mappings of key and values.
:param path: File to read metric values from.
:return: Dictionary of metric names (e.g. perplexity-train) mapping to a list of values.
"""
with open(path) as fin:
metrics = [parse_metrics_line(i, line.strip()) for i, line in enumerate(fin, 1)]
return metrics
|
python
|
{
"resource": ""
}
|
q26329
|
write_metrics_file
|
train
|
def write_metrics_file(metrics: List[Dict[str, Any]], path: str):
"""
Write metrics data to tab-separated file.
:param metrics: metrics data.
:param path: Path to write to.
"""
with open(path, 'w') as metrics_out:
for checkpoint, metric_dict in enumerate(metrics, 1):
metrics_str = "\t".join(["{}={}".format(name, value) for name, value in sorted(metric_dict.items())])
metrics_out.write("{}\t{}\n".format(checkpoint, metrics_str))
|
python
|
{
"resource": ""
}
|
q26330
|
grouper
|
train
|
def grouper(iterable: Iterable, size: int) -> Iterable:
"""
Collect data into fixed-length chunks or blocks without discarding underfilled chunks or padding them.
:param iterable: A sequence of inputs.
:param size: Chunk size.
:return: Sequence of chunks.
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, size))
if not chunk:
return
yield chunk
|
python
|
{
"resource": ""
}
|
q26331
|
metric_value_is_better
|
train
|
def metric_value_is_better(new: float, old: float, metric: str) -> bool:
"""
Returns true if new value is strictly better than old for given metric.
"""
if C.METRIC_MAXIMIZE[metric]:
return new > old
else:
return new < old
|
python
|
{
"resource": ""
}
|
q26332
|
cleanup_params_files
|
train
|
def cleanup_params_files(output_folder: str, max_to_keep: int, checkpoint: int, best_checkpoint: int, keep_first: bool):
"""
Deletes oldest parameter files from a model folder.
:param output_folder: Folder where param files are located.
:param max_to_keep: Maximum number of files to keep, negative to keep all.
:param checkpoint: Current checkpoint (i.e. index of last params file created).
:param best_checkpoint: Best checkpoint. The parameter file corresponding to this checkpoint will not be deleted.
:param keep_first: Don't delete the first checkpoint.
"""
if max_to_keep <= 0:
return
existing_files = glob.glob(os.path.join(output_folder, C.PARAMS_PREFIX + "*"))
params_name_with_dir = os.path.join(output_folder, C.PARAMS_NAME)
for n in range(1 if keep_first else 0, max(1, checkpoint - max_to_keep + 1)):
if n != best_checkpoint:
param_fname_n = params_name_with_dir % n
if param_fname_n in existing_files:
os.remove(param_fname_n)
|
python
|
{
"resource": ""
}
|
q26333
|
cast_conditionally
|
train
|
def cast_conditionally(data: mx.sym.Symbol, dtype: str) -> mx.sym.Symbol:
"""
Workaround until no-op cast will be fixed in MXNet codebase.
Creates cast symbol only if dtype is different from default one, i.e. float32.
:param data: Input symbol.
:param dtype: Target dtype.
:return: Cast symbol or just data symbol.
"""
if dtype != C.DTYPE_FP32:
return mx.sym.cast(data=data, dtype=dtype)
return data
|
python
|
{
"resource": ""
}
|
q26334
|
inflect
|
train
|
def inflect(word: str,
count: int):
"""
Minimal inflection module.
:param word: The word to inflect.
:param count: The count.
:return: The word, perhaps inflected for number.
"""
if word in ['time', 'sentence']:
return word if count == 1 else word + 's'
elif word == 'was':
return 'was' if count == 1 else 'were'
else:
return word + '(s)'
|
python
|
{
"resource": ""
}
|
q26335
|
get_coverage
|
train
|
def get_coverage(config: CoverageConfig) -> 'Coverage':
"""
Returns a Coverage instance.
:param config: Coverage configuration.
:return: Instance of Coverage.
"""
if config.type == C.COVERAGE_COUNT or config.type == C.COVERAGE_FERTILITY:
utils.check_condition(config.num_hidden == 1, "Count or fertility coverage requires coverage_num_hidden==1")
if config.type == C.GRU_TYPE:
return GRUCoverage(config.num_hidden, config.layer_normalization)
elif config.type in {C.TANH, C.SIGMOID, C.RELU, C.SOFT_RELU}:
return ActivationCoverage(config.num_hidden, config.type, config.layer_normalization)
elif config.type == C.COVERAGE_COUNT:
return CountCoverage()
elif config.type == C.COVERAGE_FERTILITY:
return FertilityCoverage(config.max_fertility)
else:
raise ValueError("Unknown coverage type %s" % config.type)
|
python
|
{
"resource": ""
}
|
q26336
|
mask_coverage
|
train
|
def mask_coverage(coverage: mx.sym.Symbol, source_length: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Masks all coverage scores that are outside the actual sequence.
:param coverage: Input coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden).
:param source_length: Source length. Shape: (batch_size,).
:return: Masked coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden).
"""
return mx.sym.SequenceMask(data=coverage, axis=1, use_sequence_length=True, sequence_length=source_length)
|
python
|
{
"resource": ""
}
|
q26337
|
bin_open
|
train
|
def bin_open(fname: str):
"""
Returns a file descriptor for a plain text or gzipped file, binary read mode
for subprocess interaction.
:param fname: The filename to open.
:return: File descriptor in binary read mode.
"""
if fname.endswith(".gz"):
return gzip.open(fname, "rb")
return open(fname, "rb")
|
python
|
{
"resource": ""
}
|
q26338
|
check_git
|
train
|
def check_git():
"""Check if git command is available."""
try:
with open(os.devnull, "wb") as devnull:
subprocess.check_call(["git", "--version"], stdout=devnull, stderr=devnull)
except:
raise RuntimeError("Please make sure git is installed and on your path.")
|
python
|
{
"resource": ""
}
|
q26339
|
checkout_subword_nmt
|
train
|
def checkout_subword_nmt(workspace_dir: str):
"""
Checkout subword-nmt implementation of byte-pair encoding.
:param workspace_dir: Workspace third-party directory.
"""
# Prerequisites
check_git()
# Check cache
dest = os.path.join(workspace_dir, DIR_THIRD_PARTY, SUBWORD_NMT_DEST)
if confirm_checkout(dest, SUBWORD_NMT_COMMIT):
logging.info("Usable: %s", dest)
return
# Need to (re-)checkout
if os.path.exists(dest):
shutil.rmtree(dest)
logging.info("Checkout: %s -> %s", SUBWORD_NMT_REPO, dest)
log_fname = os.path.join(workspace_dir, DIR_LOGS, "checkout.{}.{}.log".format(SUBWORD_NMT_DEST, os.getpid()))
with open(log_fname, "wb") as log:
logging.info("Log: %s", log_fname)
subprocess.call(["git", "clone", SUBWORD_NMT_REPO, dest], stdout=log, stderr=log)
subprocess.call(["git", "checkout", SUBWORD_NMT_COMMIT], cwd=dest, stdout=log, stderr=log)
|
python
|
{
"resource": ""
}
|
q26340
|
confirm_checkout
|
train
|
def confirm_checkout(dest: str, commit: str) -> bool:
"""
Confirm that git repository is checked out.
:param dest: Local directory for checkout.
:param commit: Git commit.
:return: True if checkout is usable.
"""
usable = False
if os.path.exists(dest):
try:
rev = subprocess.check_output(["git", "rev-parse", "--verify", "HEAD"], cwd=dest).decode("utf-8").strip()
usable = (rev == commit)
except subprocess.CalledProcessError:
pass
if not usable:
logging.info("Problem with %s, requires new checkout.", dest)
return usable
|
python
|
{
"resource": ""
}
|
q26341
|
call_moses_tokenizer
|
train
|
def call_moses_tokenizer(workspace_dir: str,
input_fname: str,
output_fname: str,
lang_code: str,
num_threads: int = 4):
"""
Call Moses tokenizer.
:param workspace_dir: Workspace third-party directory where Moses
tokenizer is checked out.
:param input_fname: Path of raw input file, plain text or gzipped.
:param output_fname: Path of tokenized output file, gzipped.
:param lang_code: Language code for rules and non-breaking prefixes.
:param num_threads: Number of threads to use.
"""
tokenizer_fname = os.path.join(workspace_dir,
DIR_THIRD_PARTY,
MOSES_DEST,
"scripts",
"tokenizer",
"tokenizer.perl")
with bin_open(input_fname) as inp, gzip.open(output_fname, "wb") as out, open(os.devnull, "wb") as devnull:
tokenizer = subprocess.Popen(["perl", tokenizer_fname, "-l", lang_code, "-threads", str(num_threads)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=devnull)
tokenizer_thread = threading.Thread(target=copy_out, args=(tokenizer.stdout, out))
tokenizer_thread.start()
for line in inp:
tokenizer.stdin.write(line)
tokenizer.stdin.close()
tokenizer_thread.join()
tokenizer.wait()
|
python
|
{
"resource": ""
}
|
q26342
|
call_moses_detokenizer
|
train
|
def call_moses_detokenizer(workspace_dir: str, input_fname: str, output_fname: str, lang_code: Optional[str] = None):
"""
Call Moses detokenizer.
:param workspace_dir: Workspace third-party directory where Moses
tokenizer is checked out.
:param input_fname: Path of tokenized input file, plain text or gzipped.
:param output_fname: Path of tokenized output file, plain text.
:param lang_code: Language code for rules and non-breaking prefixes. Can be
None if unknown (using pre-tokenized data), which will
cause the tokenizer to default to English.
"""
detokenizer_fname = os.path.join(workspace_dir,
DIR_THIRD_PARTY,
MOSES_DEST,
"scripts",
"tokenizer",
"detokenizer.perl")
with bin_open(input_fname) as inp, open(output_fname, "wb") as out, open(os.devnull, "wb") as devnull:
command = ["perl", detokenizer_fname]
if lang_code:
command.append("-l")
command.append(lang_code)
detokenizer = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=devnull)
detokenizer_thread = threading.Thread(target=copy_out, args=(detokenizer.stdout, out))
detokenizer_thread.start()
for line in inp:
detokenizer.stdin.write(line)
detokenizer.stdin.close()
detokenizer_thread.join()
detokenizer.wait()
|
python
|
{
"resource": ""
}
|
q26343
|
call_learn_bpe
|
train
|
def call_learn_bpe(workspace_dir: str, source_fname: str, target_fname: str, model_fname: str, num_ops: int = 32000):
"""
Call script to learn byte-pair encoding model.
:param workspace_dir: Workspace third-party directory where subword-nmt is
checked out.
:param source_fname: Path of source corpus file, plain text or gzipped.
:param target_fname: Path of target corpus file, plain text or gzipped.
:param model_fname: Path to write out model.
:param num_ops: Number of operations.
"""
learn_bpe_fname = os.path.join(workspace_dir, DIR_THIRD_PARTY, SUBWORD_NMT_DEST, "learn_bpe.py")
with bin_open(source_fname) as src_in, bin_open(target_fname) as trg_in, open(model_fname, "wb") as out:
learn_bpe = subprocess.Popen([sys.executable, learn_bpe_fname, "-s", str(num_ops)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
learn_bpe_thread = threading.Thread(target=copy_out, args=(learn_bpe.stdout, out))
learn_bpe_thread.start()
for inp in (src_in, trg_in):
for line in inp:
learn_bpe.stdin.write(line)
learn_bpe.stdin.close()
learn_bpe_thread.join()
learn_bpe.wait()
|
python
|
{
"resource": ""
}
|
q26344
|
call_apply_bpe
|
train
|
def call_apply_bpe(workspace_dir: str, input_fname: str, output_fname: str, model_fname: str):
"""
Call BPE apply script.
:param workspace_dir: Workspace directory where subword-nmt is checked out.
:param input_fname: Path of tokenized input file, plain text or gzipped.
:param output_fname: Path of byte-pair encoded output file, gzipped.
:param model_fname: Path of BPE model file (codes).
"""
apply_bpe_fname = os.path.join(workspace_dir, DIR_THIRD_PARTY, SUBWORD_NMT_DEST, "apply_bpe.py")
with bin_open(input_fname) as inp, gzip.open(output_fname, "wb") as out:
apply_bpe = subprocess.Popen([sys.executable, apply_bpe_fname, "-c", model_fname],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
apply_bpe_thread = threading.Thread(target=copy_out, args=(apply_bpe.stdout, out, True))
apply_bpe_thread.start()
for line in inp:
# Use an empty line placeholder to avoid blank line duplication
# issues with BPE script
if not line.strip():
line = PLACEHOLDER + b"\n"
apply_bpe.stdin.write(line)
apply_bpe.stdin.close()
apply_bpe_thread.join()
apply_bpe.wait()
|
python
|
{
"resource": ""
}
|
q26345
|
merge_bpe
|
train
|
def merge_bpe(input_fname: str, output_fname: str):
"""
Merge byte-pair encoded sub-words.
:param input_fname: Path of byte-pair encoded input file, plain text or
gzipped.
:param output_fname: Path of tokenized output file, plain text.
"""
with utils.smart_open(input_fname, "r") as inp, open(output_fname, "w", encoding="utf-8") as out:
for line in inp:
# Merge on special markers and strip stray markers (end of line)
merged = line.replace(SUBWORD_SPECIAL + " ", "").replace(SUBWORD_SPECIAL, "")
out.write(merged)
|
python
|
{
"resource": ""
}
|
q26346
|
copy_out
|
train
|
def copy_out(source: Iterable[bytes], dest: io.BytesIO, use_placeholders: bool = False):
"""
Copy lines from source to destination.
:param source: Source line iterable.
:param dest: Destination open file.
:param use_placeholders: When true, convert lines containing placeholders to
empty lines and drop true empty lines (assume to be
spuriously generated).
"""
for line in source:
if use_placeholders:
# True empty lines are assumed to be spurious as the placeholder
# should be passed through
if not line.strip():
continue
if line.startswith(PLACEHOLDER):
line = b"\n"
dest.write(line)
|
python
|
{
"resource": ""
}
|
q26347
|
raw_corpus_bleu
|
train
|
def raw_corpus_bleu(hypotheses: Iterable[str], references: Iterable[str], offset: Optional[float] = 0.01) -> float:
"""
Simple wrapper around sacreBLEU's BLEU without tokenization and smoothing.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:param offset: Smoothing constant.
:return: BLEU score as float between 0 and 1.
"""
return sacrebleu.raw_corpus_bleu(hypotheses, [references], smooth_floor=offset).score / 100.0
|
python
|
{
"resource": ""
}
|
q26348
|
raw_corpus_chrf
|
train
|
def raw_corpus_chrf(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around sacreBLEU's chrF implementation, without tokenization.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: chrF score as float between 0 and 1.
"""
return sacrebleu.corpus_chrf(hypotheses, references, order=sacrebleu.CHRF_ORDER, beta=sacrebleu.CHRF_BETA,
remove_whitespace=True)
|
python
|
{
"resource": ""
}
|
q26349
|
raw_corpus_rouge1
|
train
|
def raw_corpus_rouge1(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-1 score as float between 0 and 1.
"""
return rouge.rouge_1(hypotheses, references)
|
python
|
{
"resource": ""
}
|
q26350
|
raw_corpus_rouge2
|
train
|
def raw_corpus_rouge2(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-2 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-2 score as float between 0 and 1.
"""
return rouge.rouge_2(hypotheses, references)
|
python
|
{
"resource": ""
}
|
q26351
|
raw_corpus_rougel
|
train
|
def raw_corpus_rougel(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-L implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-L score as float between 0 and 1.
"""
return rouge.rouge_l(hypotheses, references)
|
python
|
{
"resource": ""
}
|
q26352
|
raw_corpus_length_ratio
|
train
|
def raw_corpus_length_ratio(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around length ratio implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: Length ratio score as float.
"""
ratios = [len(h.split())/len(r.split()) for h, r in zip(hypotheses, references)]
return sum(ratios)/len(ratios) if len(ratios) else 0.0
|
python
|
{
"resource": ""
}
|
q26353
|
ImageCaptioner.translate
|
train
|
def translate(self, trans_inputs: List[TranslatorInput]) -> List[TranslatorOutput]:
"""
Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs.
Splits oversized sentences to sentence chunks of size less than max_input_length.
:param trans_inputs: List of TranslatorInputs as returned by make_input().
:return: List of translation results.
"""
batch_size = self.max_batch_size
# translate in batch-sized blocks over input chunks
translations = []
for batch_id, batch in enumerate(utils.grouper(trans_inputs, batch_size)):
logger.debug("Translating batch %d", batch_id)
# underfilled batch will be filled to a full batch size with copies of the 1st input
rest = batch_size - len(batch)
if rest > 0:
logger.debug("Extending the last batch to the full batch size (%d)", batch_size)
batch = batch + [batch[0]] * rest
batch_translations = self._translate_nd(*self._get_inference_input(batch))
# truncate to remove filler translations
if rest > 0:
batch_translations = batch_translations[:-rest]
translations.extend(batch_translations)
# Concatenate results
results = [] # type: List[TranslatorOutput]
for trans_input, translation in zip(trans_inputs, translations):
results.append(self._make_result(trans_input, translation))
return results
|
python
|
{
"resource": ""
}
|
q26354
|
ImageCaptioner._get_inference_input
|
train
|
def _get_inference_input(self,
trans_inputs: List[TranslatorInput]) -> Tuple[mx.nd.NDArray,
int,
Optional[lexicon.TopKLexicon],
List[
Optional[constrained.RawConstraintList]],
List[
Optional[constrained.RawConstraintList]],
mx.nd.NDArray]:
"""
Returns NDArray of images and corresponding bucket_key and an NDArray of maximum output lengths
for each sentence in the batch.
:param trans_inputs: List of TranslatorInputs. The path of the image/feature is in the token field.
:param constraints: Optional list of constraints.
:return: NDArray of images paths, bucket key, a list of raw constraint lists,
an NDArray of maximum output lengths.
"""
batch_size = len(trans_inputs)
image_paths = [None for _ in range(batch_size)] # type: List[Optional[str]]
restrict_lexicon = None # type: Optional[lexicon.TopKLexicon]
raw_constraints = [None for _ in range(batch_size)] # type: List[Optional[constrained.RawConstraintList]]
raw_avoid_list = [None for _ in range(batch_size)] # type: List[Optional[constrained.RawConstraintList]]
for j, trans_input in enumerate(trans_inputs):
# Join relative path with absolute
path = trans_input.tokens[0]
if self.source_root is not None:
path = os.path.join(self.source_root, path)
image_paths[j] = path
# Preprocess constraints
if trans_input.constraints is not None:
raw_constraints[j] = [data_io.tokens2ids(phrase, self.vocab_target) for phrase in
trans_input.constraints]
# Read data and zero pad if necessary
images = self.data_loader(image_paths)
images = utils_image.zero_pad_features(images, self.source_image_size)
max_input_length = 0
max_output_lengths = [self.models[0].get_max_output_length(max_input_length)] * len(image_paths)
return mx.nd.array(images), max_input_length, restrict_lexicon, raw_constraints, raw_avoid_list, \
mx.nd.array(max_output_lengths, ctx=self.context, dtype='int32')
|
python
|
{
"resource": ""
}
|
q26355
|
TopKLexicon.save
|
train
|
def save(self, path: str):
"""
Save lexicon in Numpy array format. Lexicon will be specific to Sockeye model.
:param path: Path to Numpy array output file.
"""
with open(path, 'wb') as out:
np.save(out, self.lex)
logger.info("Saved top-k lexicon to \"%s\"", path)
|
python
|
{
"resource": ""
}
|
q26356
|
TopKLexicon.load
|
train
|
def load(self, path: str, k: Optional[int] = None):
"""
Load lexicon from Numpy array file. The top-k target ids will be sorted by increasing target id.
:param path: Path to Numpy array file.
:param k: Optionally load less items than stored in path.
"""
load_time_start = time.time()
with open(path, 'rb') as inp:
_lex = np.load(inp)
loaded_k = _lex.shape[1]
if k is not None:
top_k = min(k, loaded_k)
if k > loaded_k:
logger.warning("Can not load top-%d translations from lexicon that "
"contains at most %d entries per source.", k, loaded_k)
else:
top_k = loaded_k
self.lex = np.zeros((len(self.vocab_source), top_k), dtype=_lex.dtype)
for src_id, trg_ids in enumerate(_lex):
self.lex[src_id, :] = np.sort(trg_ids[:top_k])
load_time = time.time() - load_time_start
logger.info("Loaded top-%d lexicon from \"%s\" in %.4fs.", top_k, path, load_time)
|
python
|
{
"resource": ""
}
|
q26357
|
TopKLexicon.get_trg_ids
|
train
|
def get_trg_ids(self, src_ids: np.ndarray) -> np.ndarray:
"""
Lookup possible target ids for input sequence of source ids.
:param src_ids: Sequence(s) of source ids (any shape).
:return: Possible target ids for source (unique sorted, always includes special symbols).
"""
# TODO: When MXNet adds support for set operations, we can migrate to avoid conversions to/from NumPy.
unique_src_ids = np.lib.arraysetops.unique(src_ids)
trg_ids = np.lib.arraysetops.union1d(self.always_allow, self.lex[unique_src_ids, :].reshape(-1))
return trg_ids
|
python
|
{
"resource": ""
}
|
q26358
|
setup_main_logger
|
train
|
def setup_main_logger(file_logging=True, console=True, path: Optional[str] = None, level=logging.INFO):
"""
Configures logging for the main application.
:param file_logging: Whether to log to a file.
:param console: Whether to log to the console.
:param path: Optional path to write logfile to.
:param level: Log level. Default: INFO.
"""
if file_logging and console:
log_config = LOGGING_CONFIGS["file_console"] # type: ignore
elif file_logging:
log_config = LOGGING_CONFIGS["file_only"]
elif console:
log_config = LOGGING_CONFIGS["console_only"]
else:
log_config = LOGGING_CONFIGS["none"]
if path:
log_config["handlers"]["rotating"]["filename"] = path # type: ignore
for _, handler_config in log_config['handlers'].items(): # type: ignore
handler_config['level'] = level
logging.config.dictConfig(log_config) # type: ignore
def exception_hook(exc_type, exc_value, exc_traceback):
if is_python34():
# Python3.4 does not seem to handle logger.exception() well
import traceback
traceback = "".join(traceback.format_tb(exc_traceback)) + exc_type.name
logging.error("Uncaught exception\n%s", traceback)
else:
logging.exception("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = exception_hook
|
python
|
{
"resource": ""
}
|
q26359
|
identify_raw_files
|
train
|
def identify_raw_files(task: Task, test_mode: bool = False) -> List[str]:
"""
Identify raw files that need to be downloaded for a given task.
:param task: Sequence-to-sequence task.
:param test_mode: Run in test mode, only downloading test data.
:return: List of raw file names.
"""
raw_files = set()
all_sets = [task.test,] if test_mode else [task.train, task.dev, task.test]
for file_sets in all_sets:
for file_set in file_sets:
for fname in file_set[:2]:
raw_file = fname.split("/", 1)[0]
if raw_file not in RAW_FILES:
raise RuntimeError("Unknown raw file %s found in path %s" % (raw_file, fname))
raw_files.add(raw_file)
return sorted(raw_files)
|
python
|
{
"resource": ""
}
|
q26360
|
download_extract_raw_files
|
train
|
def download_extract_raw_files(names: List[str], cache_dir: str, dest_dir: str):
"""
Download and extract raw files, making use of a cache directory.
- Downloaded files are verified by MD5 sum.
- Extraction overwrites existing files.
:param names: List of raw file names in RAW_FILES.
:param cache_dir: Cache directory for downloading raw files.
:param dest_dir: Destination directory for extracting raw files.
"""
for name in names:
raw_file = RAW_FILES[name]
local_dir = os.path.join(cache_dir, name)
local_fname = os.path.join(local_dir, os.path.basename(raw_file.url))
# Download file if not present
if not os.path.exists(local_dir):
logging.info("Create: %s", local_dir)
os.makedirs(local_dir)
if not os.path.exists(local_fname):
logging.info("Download: %s -> %s", raw_file.url, local_fname)
urllib.request.urlretrieve(raw_file.url, local_fname)
# Check MD5 sum, attempt one re-download on mismatch
md5 = md5sum(local_fname)
if not md5 == raw_file.md5:
logging.info("MD5 mismatch for %s, attempt re-download %s", local_fname, raw_file.url)
urllib.request.urlretrieve(raw_file.url, local_fname)
md5 = md5sum(local_fname)
if not md5 == raw_file.md5:
raise RuntimeError("MD5 mismatch for %s after re-download. Check validity of %s"
% (local_fname, raw_file.url))
logging.info("Confirmed MD5: %s (%s)", local_fname, md5)
# Extract file(s), overwriting directory if exists
extract_path = os.path.join(dest_dir, name)
if os.path.exists(extract_path):
shutil.rmtree(extract_path)
os.makedirs(extract_path)
logging.info("Extract: %s -> %s", local_fname, extract_path)
if raw_file.archive_type == ARCHIVE_NONE:
os.symlink(local_fname, os.path.join(extract_path, os.path.basename(local_fname)))
elif raw_file.archive_type == ARCHIVE_TAR:
tar = tarfile.open(local_fname)
tar.extractall(path=extract_path)
elif raw_file.archive_type == ARCHIVE_ZIP:
zipf = zipfile.ZipFile(local_fname, "r")
zipf.extractall(path=extract_path)
else:
raise RuntimeError("Unknown archive type: %s" % raw_file.archive_type)
|
python
|
{
"resource": ""
}
|
q26361
|
md5sum
|
train
|
def md5sum(fname: str) -> str:
"""Compute MD5 sum of file."""
with open(fname, "rb") as inp:
md5 = hashlib.md5(inp.read()).hexdigest()
return md5
|
python
|
{
"resource": ""
}
|
q26362
|
populate_parallel_text
|
train
|
def populate_parallel_text(extract_dir: str,
file_sets: List[Tuple[str, str, str]],
dest_prefix: str,
keep_separate: bool,
head_n: int = 0):
"""
Create raw parallel train, dev, or test files with a given prefix.
:param extract_dir: Directory where raw files (inputs) are extracted.
:param file_sets: Sets of files to use.
:param dest_prefix: Prefix for output files.
:param keep_separate: True if each file set (source-target pair) should have
its own file (used for test sets).
:param head_n: If N>0, use only the first N lines (used in test mode).
"""
source_out = None # type: IO[Any]
target_out = None # type: IO[Any]
lines_written = 0
# Single output file for each side
if not keep_separate:
source_dest = dest_prefix + SUFFIX_SRC_GZ
target_dest = dest_prefix + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
source_out = gzip.open(source_dest, "wt", encoding="utf-8")
target_out = gzip.open(target_dest, "wt", encoding="utf-8")
for i, (source_fname, target_fname, text_type) in enumerate(file_sets):
# One output file per input file for each side
if keep_separate:
if source_out:
source_out.close()
if target_out:
target_out.close()
source_dest = dest_prefix + str(i) + "." + SUFFIX_SRC_GZ
target_dest = dest_prefix + str(i) + "." + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
source_out = gzip.open(source_dest, "wt", encoding="utf-8")
target_out = gzip.open(target_dest, "wt", encoding="utf-8")
for source_line, target_line in zip(
plain_text_iter(os.path.join(extract_dir, source_fname), text_type, DATA_SRC),
plain_text_iter(os.path.join(extract_dir, target_fname), text_type, DATA_TRG)):
# Only write N lines total if requested, but reset per file when
# keeping files separate
if head_n > 0 and lines_written >= head_n:
if keep_separate:
lines_written = 0
break
source_out.write("{}\n".format(source_line))
target_out.write("{}\n".format(target_line))
lines_written += 1
source_out.close()
target_out.close()
|
python
|
{
"resource": ""
}
|
q26363
|
copy_parallel_text
|
train
|
def copy_parallel_text(file_list: List[str], dest_prefix: str):
"""
Copy pre-compiled raw parallel files with a given prefix. Perform
whitespace character normalization to ensure that only ASCII newlines are
considered line breaks.
:param file_list: List of file pairs to use.
:param dest_prefix: Prefix for output files.
"""
# Group files into source-target pairs
file_sets = []
for i in range(0, len(file_list), 2):
file_sets.append((file_list[i], file_list[i + 1]))
multiple_sets = len(file_sets) > 1
for i, (source_fname, target_fname) in enumerate(file_sets):
if multiple_sets:
source_dest = dest_prefix + str(i) + "." + SUFFIX_SRC_GZ
target_dest = dest_prefix + str(i) + "." + SUFFIX_TRG_GZ
else:
source_dest = dest_prefix + SUFFIX_SRC_GZ
target_dest = dest_prefix + SUFFIX_TRG_GZ
logging.info("Populate: %s %s", source_dest, target_dest)
with gzip.open(source_dest, "wb") as source_out, gzip.open(target_dest, "wb") as target_out:
with third_party.bin_open(source_fname) as inp:
for line in inp:
line = (re.sub(r"\s", " ", line.decode("utf-8"))).encode("utf-8") + b"\n"
source_out.write(line)
with third_party.bin_open(target_fname) as inp:
for line in inp:
line = (re.sub(r"\s", " ", line.decode("utf-8"))).encode("utf-8") + b"\n"
target_out.write(line)
|
python
|
{
"resource": ""
}
|
q26364
|
renew_step_dir
|
train
|
def renew_step_dir(step_dir: str):
"""Delete step directory if exists and create, reporting actions."""
if os.path.exists(step_dir):
logging.info("Remove unfinished step %s", step_dir)
shutil.rmtree(step_dir)
logging.info("Create: %s", step_dir)
os.makedirs(step_dir)
|
python
|
{
"resource": ""
}
|
q26365
|
call_sockeye_train
|
train
|
def call_sockeye_train(model: str,
bpe_dir: str,
model_dir: str,
log_fname: str,
num_gpus: int,
test_mode: bool = False):
"""
Call sockeye.train with specified arguments on prepared inputs. Will resume
partial training or skip training if model is already finished. Record
command for future use.
:param model: Type of translation model to train.
:param bpe_dir: Directory of BPE-encoded input data.
:param model_dir: Model output directory.
:param log_fname: Location to write log file.
:param num_gpus: Number of GPUs to use for training (0 for CPU).
:param test_mode: Run in test mode, stopping after a small number of
updates.
"""
# Inputs and outputs
fnames = ["--source={}".format(os.path.join(bpe_dir, PREFIX_TRAIN + SUFFIX_SRC_GZ)),
"--target={}".format(os.path.join(bpe_dir, PREFIX_TRAIN + SUFFIX_TRG_GZ)),
"--validation-source={}".format(os.path.join(bpe_dir, PREFIX_DEV + SUFFIX_SRC_GZ)),
"--validation-target={}".format(os.path.join(bpe_dir, PREFIX_DEV + SUFFIX_TRG_GZ)),
"--output={}".format(model_dir)]
# Assemble command
command = [sys.executable, "-m", "sockeye.train"] + fnames + MODELS[model]
# Request GPUs or specify CPU
if num_gpus > 0:
command.append("--device-ids=-{}".format(num_gpus))
else:
command.append("--use-cpu")
# Test mode trains a smaller model for a small number of steps
if test_mode:
command += MODEL_TEST_ARGS[model]
command_fname = os.path.join(model_dir, FILE_COMMAND.format("sockeye.train"))
# Run unless training already finished
if not os.path.exists(command_fname):
# Call Sockeye training
with open(log_fname, "wb") as log:
logging.info("sockeye.train: %s", model_dir)
logging.info("Log: %s", log_fname)
logging.info("(This step can take several days. See log file or TensorBoard for progress)")
subprocess.check_call(command, stderr=log)
# Record successful command
logging.info("Command: %s", command_fname)
print_command(command, command_fname)
|
python
|
{
"resource": ""
}
|
q26366
|
call_sockeye_average
|
train
|
def call_sockeye_average(model_dir: str, log_fname: str):
"""
Call sockeye.average with reasonable defaults.
:param model_dir: Trained model directory.
:param log_fname: Location to write log file.
"""
params_best_fname = os.path.join(model_dir, C.PARAMS_BEST_NAME)
params_best_single_fname = os.path.join(model_dir, PARAMS_BEST_SINGLE)
params_average_fname = os.path.join(model_dir, PARAMS_AVERAGE)
command = [sys.executable,
"-m",
"sockeye.average",
"--metric={}".format(AVERAGE_METRIC),
"-n",
str(AVERAGE_NUM_CHECKPOINTS),
"--output={}".format(params_average_fname),
"--strategy={}".format(AVERAGE_STRATEGY),
model_dir]
command_fname = os.path.join(model_dir, FILE_COMMAND.format("sockeye.average"))
# Run average if not previously run
if not os.path.exists(command_fname):
# Re-link best point to best single point
os.symlink(os.path.basename(os.path.realpath(params_best_fname)), params_best_single_fname)
os.remove(params_best_fname)
# Call Sockeye average
with open(log_fname, "wb") as log:
logging.info("sockeye.average: %s", os.path.join(model_dir, params_best_fname))
logging.info("Log: %s", log_fname)
subprocess.check_call(command, stderr=log)
# Link averaged point as new best
os.symlink(PARAMS_AVERAGE, params_best_fname)
# Record successful command
logging.info("Command: %s", command_fname)
print_command(command, command_fname)
|
python
|
{
"resource": ""
}
|
q26367
|
call_sockeye_translate
|
train
|
def call_sockeye_translate(args: List[str],
input_fname: str,
output_fname: str,
model_dir: str,
log_fname: str,
use_cpu: bool):
"""
Call sockeye.translate with specified arguments using a trained model.
:param args: Command line arguments for sockeye.translate.
:param input_fname: Input file (byte-pair encoded).
:param output_fname: Raw decoder output file.
:param model_dir: Model output directory.
:param log_fname: Location to write log file.
:param use_cpu: Use CPU instead of GPU for decoding.
"""
# Inputs and outputs
fnames = ["--input={}".format(input_fname),
"--output={}".format(output_fname),
"--models={}".format(model_dir)]
# Assemble command
command = [sys.executable, "-m", "sockeye.translate"] + fnames + args
# Request GPUs or specify CPU
if use_cpu:
command.append("--use-cpu")
command_fname = output_fname + "." + SUFFIX_COMMAND
# Run unless translate already finished
if not os.path.exists(command_fname):
# Call Sockeye translate
with open(log_fname, "wb") as log:
logging.info("sockeye.translate: %s -> %s", input_fname, output_fname)
logging.info("Log: %s", log_fname)
subprocess.check_call(command, stderr=log)
# Cleanup redundant log file
try:
os.remove(output_fname + ".log")
except FileNotFoundError:
pass
# Record successful command
logging.info("Command: %s", command_fname)
print_command(command, command_fname)
|
python
|
{
"resource": ""
}
|
q26368
|
call_sacrebleu
|
train
|
def call_sacrebleu(input_fname: str, ref_fname: str, output_fname: str, log_fname: str, tokenized: bool = False):
"""
Call pip-installed sacrebleu on tokenized or detokenized inputs.
:param input_fname: Input translation file.
:param ref_fname: Reference translation file.
:param output_fname: Output score file.
:param log_fname: Location to write log file.
:param tokenized: Whether inputs are tokenized (or byte-pair encoded).
"""
# Assemble command
command = ["sacrebleu",
"--score-only",
"--input={}".format(input_fname),
ref_fname]
# Already tokenized?
if tokenized:
command.append("--tokenize=none")
# Call sacrebleu
with open(log_fname, "wb") as log:
logging.info("sacrebleu: %s -> %s", input_fname, output_fname)
logging.info("Log: %s", log_fname)
score = subprocess.check_output(command, stderr=log)
# Record successful score
with open(output_fname, "wb") as out:
out.write(score)
|
python
|
{
"resource": ""
}
|
q26369
|
print_command
|
train
|
def print_command(command: List[str], fname: str):
"""
Format and print command to file.
:param command: Command in args list form.
:param fname: File name to write out.
"""
with open(fname, "w", encoding="utf-8") as out:
print(" \\\n".join(command), file=out)
|
python
|
{
"resource": ""
}
|
q26370
|
init_weight
|
train
|
def init_weight(weight: np.ndarray,
vocab_in: Dict[str, int],
vocab_out: Dict[str, int],
initializer: mx.initializer.Initializer=mx.init.Constant(value=0.0)) -> mx.nd.NDArray:
"""
Initialize vocabulary-sized weight by existing values given input and output vocabularies.
:param weight: Input weight.
:param vocab_in: Input vocabulary.
:param vocab_out: Output vocabulary.
:param initializer: MXNet initializer.
:return: Initialized output weight.
"""
shape = list(weight.shape)
shape[0] = len(vocab_out)
weight_init = mx.nd.empty(tuple(shape), dtype='float32')
weight_desc = mx.init.InitDesc("vocabulary_sized_weight")
initializer(weight_desc, weight_init)
for token in vocab_out:
if token in vocab_in:
weight_init[vocab_out[token]] = weight[vocab_in[token]]
return weight_init
|
python
|
{
"resource": ""
}
|
q26371
|
load_weight
|
train
|
def load_weight(weight_file: str,
weight_name: str,
weight_file_cache: Dict[str, Dict]) -> mx.nd.NDArray:
"""
Load wight fron a file or the cache if it was loaded before.
:param weight_file: Weight file.
:param weight_name: Weight name.
:param weight_file_cache: Cache of loaded files.
:return: Loaded weight.
"""
logger.info('Loading input weight file: %s', weight_file)
if weight_file.endswith(".npy"):
return np.load(weight_file)
elif weight_file.endswith(".npz"):
if weight_file not in weight_file_cache:
weight_file_cache[weight_file] = np.load(weight_file)
return weight_file_cache[weight_file][weight_name]
else:
if weight_file not in weight_file_cache:
weight_file_cache[weight_file] = mx.nd.load(weight_file)
return weight_file_cache[weight_file]['arg:%s' % weight_name].asnumpy()
|
python
|
{
"resource": ""
}
|
q26372
|
main
|
train
|
def main():
"""
Commandline interface to initialize Sockeye embedding weights with pretrained word representations.
"""
setup_main_logger(console=True, file_logging=False)
params = argparse.ArgumentParser(description='Quick usage: python3 -m sockeye.init_embedding '
'-w embed-in-src.npy embed-in-tgt.npy '
'-i vocab-in-src.json vocab-in-tgt.json '
'-o vocab-out-src.json vocab-out-tgt.json '
'-n source_embed_weight target_embed_weight '
'-f params.init')
arguments.add_init_embedding_args(params)
args = params.parse_args()
init_embeddings(args)
|
python
|
{
"resource": ""
}
|
q26373
|
ScoringModel.run
|
train
|
def run(self, batch: mx.io.DataBatch) -> List[mx.nd.NDArray]:
"""
Runs the forward pass and returns the outputs.
:param batch: The batch to run.
:return: The grouped symbol (probs and target dists) and lists containing the data names and label names.
"""
self.module.forward(batch, is_train=False)
return self.module.get_outputs()
|
python
|
{
"resource": ""
}
|
q26374
|
vocab_to_json
|
train
|
def vocab_to_json(vocab: Vocab, path: str):
"""
Saves vocabulary in human-readable json.
:param vocab: Vocabulary mapping.
:param path: Output file path.
"""
with open(path, "w", encoding=C.VOCAB_ENCODING) as out:
json.dump(vocab, out, indent=4, ensure_ascii=False)
logger.info('Vocabulary saved to "%s"', path)
|
python
|
{
"resource": ""
}
|
q26375
|
vocab_from_json
|
train
|
def vocab_from_json(path: str, encoding: str = C.VOCAB_ENCODING) -> Vocab:
"""
Saves vocabulary in json format.
:param path: Path to json file containing the vocabulary.
:param encoding: Vocabulary encoding.
:return: The loaded vocabulary.
"""
with open(path, encoding=encoding) as inp:
vocab = json.load(inp)
utils.check_condition(is_valid_vocab(vocab), "Vocabulary %s not valid." % path)
logger.info('Vocabulary (%d words) loaded from "%s"', len(vocab), path)
return vocab
|
python
|
{
"resource": ""
}
|
q26376
|
save_target_vocab
|
train
|
def save_target_vocab(target_vocab: Vocab, folder: str):
"""
Saves target vocabulary to folder.
:param target_vocab: Target vocabulary.
:param folder: Destination folder.
"""
vocab_to_json(target_vocab, os.path.join(folder, C.VOCAB_TRG_NAME % 0))
|
python
|
{
"resource": ""
}
|
q26377
|
load_source_vocabs
|
train
|
def load_source_vocabs(folder: str) -> List[Vocab]:
"""
Loads source vocabularies from folder. The first element in the list is the primary source vocabulary.
Other elements correspond to optional additional source factor vocabularies found in folder.
:param folder: Source folder.
:return: List of vocabularies.
"""
return [vocab_from_json(os.path.join(folder, fname)) for fname in
sorted([f for f in os.listdir(folder) if f.startswith(C.VOCAB_SRC_PREFIX)])]
|
python
|
{
"resource": ""
}
|
q26378
|
load_target_vocab
|
train
|
def load_target_vocab(folder: str) -> Vocab:
"""
Loads target vocabulary from folder.
:param folder: Source folder.
:return: Target vocabulary
"""
return vocab_from_json(os.path.join(folder, C.VOCAB_TRG_NAME % 0))
|
python
|
{
"resource": ""
}
|
q26379
|
load_or_create_vocab
|
train
|
def load_or_create_vocab(data: str, vocab_path: Optional[str], num_words: int, word_min_count: int,
pad_to_multiple_of: Optional[int] = None) -> Vocab:
"""
If the vocabulary path is defined, the vocabulary is loaded from the path.
Otherwise, it is built from the data file. No writing to disk occurs.
"""
if vocab_path is None:
return build_from_paths(paths=[data], num_words=num_words, min_count=word_min_count,
pad_to_multiple_of=pad_to_multiple_of)
else:
return vocab_from_json(vocab_path)
|
python
|
{
"resource": ""
}
|
q26380
|
reverse_vocab
|
train
|
def reverse_vocab(vocab: Vocab) -> InverseVocab:
"""
Returns value-to-key mapping from key-to-value-mapping.
:param vocab: Key to value mapping.
:return: A mapping from values to keys.
"""
return {v: k for k, v in vocab.items()}
|
python
|
{
"resource": ""
}
|
q26381
|
get_ordered_tokens_from_vocab
|
train
|
def get_ordered_tokens_from_vocab(vocab: Vocab) -> List[str]:
"""
Returns the list of tokens in a vocabulary, ordered by increasing vocabulary id.
:param vocab: Input vocabulary.
:return: List of tokens.
"""
return [token for token, token_id in sorted(vocab.items(), key=lambda i: i[1])]
|
python
|
{
"resource": ""
}
|
q26382
|
make_inputs
|
train
|
def make_inputs(input_file: Optional[str],
translator: inference.Translator,
input_is_json: bool,
input_factors: Optional[List[str]] = None) -> Generator[inference.TranslatorInput, None, None]:
"""
Generates TranslatorInput instances from input. If input is None, reads from stdin. If num_input_factors > 1,
the function will look for factors attached to each token, separated by '|'.
If source is not None, reads from the source file. If num_source_factors > 1, num_source_factors source factor
filenames are required.
:param input_file: The source file (possibly None).
:param translator: Translator that will translate each line of input.
:param input_is_json: Whether the input is in json format.
:param input_factors: Source factor files.
:return: TranslatorInput objects.
"""
if input_file is None:
check_condition(input_factors is None, "Translating from STDIN, not expecting any factor files.")
for sentence_id, line in enumerate(sys.stdin, 1):
if input_is_json:
yield inference.make_input_from_json_string(sentence_id=sentence_id,
json_string=line,
translator=translator)
else:
yield inference.make_input_from_factored_string(sentence_id=sentence_id,
factored_string=line,
translator=translator)
else:
input_factors = [] if input_factors is None else input_factors
inputs = [input_file] + input_factors
if not input_is_json:
check_condition(translator.num_source_factors == len(inputs),
"Model(s) require %d factors, but %d given (through --input and --input-factors)." % (
translator.num_source_factors, len(inputs)))
with ExitStack() as exit_stack:
streams = [exit_stack.enter_context(data_io.smart_open(i)) for i in inputs]
for sentence_id, inputs in enumerate(zip(*streams), 1):
if input_is_json:
yield inference.make_input_from_json_string(sentence_id=sentence_id,
json_string=inputs[0],
translator=translator)
else:
yield inference.make_input_from_multiple_strings(sentence_id=sentence_id, strings=list(inputs))
|
python
|
{
"resource": ""
}
|
q26383
|
read_and_translate
|
train
|
def read_and_translate(translator: inference.Translator,
output_handler: OutputHandler,
chunk_size: Optional[int],
input_file: Optional[str] = None,
input_factors: Optional[List[str]] = None,
input_is_json: bool = False) -> None:
"""
Reads from either a file or stdin and translates each line, calling the output_handler with the result.
:param output_handler: Handler that will write output to a stream.
:param translator: Translator that will translate each line of input.
:param chunk_size: The size of the portion to read at a time from the input.
:param input_file: Optional path to file which will be translated line-by-line if included, if none use stdin.
:param input_factors: Optional list of paths to files that contain source factors.
:param input_is_json: Whether the input is in json format.
"""
batch_size = translator.max_batch_size
if chunk_size is None:
if translator.max_batch_size == 1:
# No batching, therefore there is not need to read segments in chunks.
chunk_size = C.CHUNK_SIZE_NO_BATCHING
else:
# Get a constant number of batches per call to Translator.translate.
chunk_size = C.CHUNK_SIZE_PER_BATCH_SEGMENT * translator.max_batch_size
else:
if chunk_size < translator.max_batch_size:
logger.warning("You specified a chunk size (%d) smaller than the max batch size (%d). This will lead to "
"a reduction in translation speed. Consider choosing a larger chunk size." % (chunk_size,
batch_size))
logger.info("Translating...")
total_time, total_lines = 0.0, 0
for chunk in grouper(make_inputs(input_file, translator, input_is_json, input_factors), size=chunk_size):
chunk_time = translate(output_handler, chunk, translator)
total_lines += len(chunk)
total_time += chunk_time
if total_lines != 0:
logger.info("Processed %d lines. Total time: %.4f, sec/sent: %.4f, sent/sec: %.4f",
total_lines, total_time, total_time / total_lines, total_lines / total_time)
else:
logger.info("Processed 0 lines.")
|
python
|
{
"resource": ""
}
|
q26384
|
translate
|
train
|
def translate(output_handler: OutputHandler,
trans_inputs: List[inference.TranslatorInput],
translator: inference.Translator) -> float:
"""
Translates each line from source_data, calling output handler after translating a batch.
:param output_handler: A handler that will be called once with the output of each translation.
:param trans_inputs: A enumerable list of translator inputs.
:param translator: The translator that will be used for each line of input.
:return: Total time taken.
"""
tic = time.time()
trans_outputs = translator.translate(trans_inputs)
total_time = time.time() - tic
batch_time = total_time / len(trans_inputs)
for trans_input, trans_output in zip(trans_inputs, trans_outputs):
output_handler.handle(trans_input, trans_output, batch_time)
return total_time
|
python
|
{
"resource": ""
}
|
q26385
|
ConvolutionBlock.step
|
train
|
def step(self, data):
"""
Run convolution over a single position. The data must be exactly as wide as the convolution filters.
:param data: Shape: (batch_size, kernel_width, num_hidden).
:return: Single result of a convolution. Shape: (batch_size, 1, num_hidden).
"""
# As we only run convolution over a single window that is exactly the size of the convolutional filter
# we can use FullyConnected instead of Convolution for efficiency reasons. Additionally we do not need to
# perform any masking.
num_hidden = self._pre_activation_num_hidden()
# (batch_size, num_hidden, kernel_width)
data = mx.sym.swapaxes(data, dim1=1, dim2=2)
# (batch_size, num_hidden * kernel_width)
data = mx.sym.reshape(data, shape=(0, -3))
# (preact_num_hidden, num_hidden * kernel_width)
weight = mx.sym.reshape(self.conv_weight, shape=(0, -3))
data_conv = mx.sym.FullyConnected(data=data,
weight=weight,
bias=self.conv_bias,
num_hidden=num_hidden)
# (batch_size, num_hidden, 1)
data_conv = mx.sym.expand_dims(data_conv, axis=2)
return self._post_convolution(data_conv)
|
python
|
{
"resource": ""
}
|
q26386
|
benchmark
|
train
|
def benchmark(cores, args):
"""
benchmark is used for Processing per core translation. Each core translates the whole input file.
Return after all translations done.
:param cores: the number of cores used for translation, each core will launch a thread to translate
:param args: input parameters
"""
model = args.module
fileInput = args.input_file
fileOutput = args.output_file
batchsize = args.batch_size
thread = []
for i in range(cores):
command = "taskset -c %d-%d python3 -m sockeye.translate -m %s -i %s -o %s --batch-size %d --output-type benchmark --use-cpu > /dev/null 2>&1 " % (i, i, model, fileInput, fileOutput, batchsize)
t = threading.Thread(target = task, args=(command,))
thread.append(t)
t.start()
for t in thread:
t.join()
|
python
|
{
"resource": ""
}
|
q26387
|
split_file
|
train
|
def split_file(splitNum, fileInput, lines):
"""
split_file is used to split fileInput into splitNum small pieces file.
For example, when splitNum is 56, a 112 lines file will be split into 56 files and each file has 2 lines.
:param splitNum: split into splitNum files
:param fileInput: file to be split
:param lines: lines of fileInput
"""
quot = lines // splitNum
rema = lines % splitNum
files = []
current_line = 0
for i in range(splitNum):
if i < rema:
read_line = quot + 1
else:
read_line = quot
temp = tempfile.NamedTemporaryFile()
os.system("head -n%d %s| tail -n%d > %s" % (current_line + read_line, fileInput, read_line, temp.name))
current_line += read_line
files.append(temp)
return files
|
python
|
{
"resource": ""
}
|
q26388
|
_indent
|
train
|
def _indent(indent=0, quote='', indent_char=' '):
"""Indent util function, compute new indent_string"""
if indent > 0:
indent_string = ''.join((
str(quote),
(indent_char * (indent - len(quote)))
))
else:
indent_string = ''.join((
('\x08' * (-1 * (indent - len(quote)))),
str(quote))
)
if len(indent_string):
INDENT_STRINGS.append(indent_string)
|
python
|
{
"resource": ""
}
|
q26389
|
puts
|
train
|
def puts(s='', newline=True, stream=STDOUT):
"""Prints given string to stdout."""
max_width_ctx = _get_max_width_context()
if max_width_ctx:
cols, separator = max_width_ctx[-1]
s = max_width(s, cols, separator)
if newline:
s = tsplit(s, NEWLINES)
s = map(str, s)
indent = ''.join(INDENT_STRINGS)
s = (str('\n' + indent)).join(s)
_str = ''.join((
''.join(INDENT_STRINGS),
str(s),
'\n' if newline else ''
))
stream(_str)
|
python
|
{
"resource": ""
}
|
q26390
|
puts_err
|
train
|
def puts_err(s='', newline=True, stream=STDERR):
"""Prints given string to stderr."""
puts(s, newline, stream)
|
python
|
{
"resource": ""
}
|
q26391
|
console_width
|
train
|
def console_width(kwargs):
""""Determine console_width."""
if sys.platform.startswith('win'):
console_width = _find_windows_console_width()
else:
console_width = _find_unix_console_width()
_width = kwargs.get('width', None)
if _width:
console_width = _width
else:
if not console_width:
console_width = 80
return console_width
|
python
|
{
"resource": ""
}
|
q26392
|
AppDir._create
|
train
|
def _create(self):
"""Creates current AppDir at AppDir.path."""
self._raise_if_none()
if not self._exists:
mkdir_p(self.path)
self._exists = True
|
python
|
{
"resource": ""
}
|
q26393
|
AppDir.open
|
train
|
def open(self, filename, mode='r'):
"""Returns file object from given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
return open(fn, mode)
|
python
|
{
"resource": ""
}
|
q26394
|
AppDir.append
|
train
|
def append(self, filename, content, binary=False):
"""Appends given content to given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
if binary:
flags = 'ab'
else:
flags = 'a'
with open(fn, 'a') as f:
f.write(content)
return True
|
python
|
{
"resource": ""
}
|
q26395
|
AppDir.delete
|
train
|
def delete(self, filename=''):
"""Deletes given file or directory. If no filename is passed, current
directory is removed.
"""
self._raise_if_none()
fn = path_join(self.path, filename)
try:
if isfile(fn):
remove(fn)
else:
removedirs(fn)
except OSError as why:
if why.errno == errno.ENOENT:
pass
else:
raise why
|
python
|
{
"resource": ""
}
|
q26396
|
AppDir.read
|
train
|
def read(self, filename, binary=False):
"""Returns contents of given file with AppDir.
If file doesn't exist, returns None."""
self._raise_if_none()
fn = path_join(self.path, filename)
if binary:
flags = 'br'
else:
flags = 'r'
try:
with open(fn, flags) as f:
return f.read()
except IOError:
return None
|
python
|
{
"resource": ""
}
|
q26397
|
AppDir.sub
|
train
|
def sub(self, path):
"""Returns AppDir instance for given subdirectory name."""
if is_collection(path):
path = path_join(path)
return AppDir(path_join(self.path, path))
|
python
|
{
"resource": ""
}
|
q26398
|
expand_path
|
train
|
def expand_path(path):
"""Expands directories and globs in given path."""
paths = []
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if os.path.isdir(path):
for (dir, dirs, files) in os.walk(path):
for file in files:
paths.append(os.path.join(dir, file))
else:
paths.extend(glob(path))
return paths
|
python
|
{
"resource": ""
}
|
q26399
|
tsplit
|
train
|
def tsplit(string, delimiters):
"""Behaves str.split but supports tuples of delimiters."""
delimiters = tuple(delimiters)
if len(delimiters) < 1:
return [string,]
final_delimiter = delimiters[0]
for i in delimiters[1:]:
string = string.replace(i, final_delimiter)
return string.split(final_delimiter)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.