_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q26200
_decode_and_evaluate
train
def _decode_and_evaluate(decoder: checkpoint_decoder.CheckpointDecoder, checkpoint: int, output_name: str, queue: multiprocessing.Queue): """ Decodes and evaluates using given checkpoint_decoder and puts result in the queue, indexed by the checkpoint. """ metrics = decoder.decode_and_evaluate(checkpoint, output_name) queue.put((checkpoint, metrics))
python
{ "resource": "" }
q26201
TrainingModel._generate_fixed_param_names
train
def _generate_fixed_param_names(self, param_names: List[str], strategy: str) -> List[str]: """ Generate a fixed parameter list given a list of all parameter names and a strategy. """ # Number of encoder/decoder layers in model. if isinstance(self.config.config_encoder, EmptyEncoderConfig): num_encoder_layers = 1 elif isinstance(self.config.config_encoder, RecurrentEncoderConfig): num_encoder_layers = self.config.config_encoder.rnn_config.num_layers else: num_encoder_layers = self.config.config_encoder.num_layers if isinstance(self.config.config_decoder, RecurrentDecoderConfig): num_decoder_layers = self.config.config_decoder.rnn_config.num_layers else: num_decoder_layers = self.config.config_decoder.num_layers def is_fixed(name: str) -> bool: if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_DECODER: # Any decoder layer. return not name.startswith(C.DECODER_PREFIX) if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_OUTER_LAYERS: # First and last encoder and decoder layers for RNN, # Transformer, and CNN models. return not (name.startswith("{}{}l{}".format(C.BIDIRECTIONALRNN_PREFIX, C.FORWARD_PREFIX, 0)) or name.startswith("{}{}l{}".format(C.BIDIRECTIONALRNN_PREFIX, C.REVERSE_PREFIX, 0)) or name.startswith("{}l{}".format(C.STACKEDRNN_PREFIX, num_encoder_layers - 2)) or name.startswith("{}l{}".format(C.RNN_DECODER_PREFIX, 0)) or name.startswith("{}l{}".format(C.RNN_DECODER_PREFIX, num_decoder_layers - 1)) or name.startswith("{}{}".format(C.TRANSFORMER_ENCODER_PREFIX, 0)) or name.startswith("{}{}".format(C.TRANSFORMER_ENCODER_PREFIX, num_encoder_layers - 1)) or name.startswith("{}{}".format(C.TRANSFORMER_DECODER_PREFIX, 0)) or name.startswith("{}{}".format(C.TRANSFORMER_DECODER_PREFIX, num_decoder_layers - 1)) or name.startswith("{}{}".format(C.CNN_ENCODER_PREFIX, 0)) or name.startswith("{}{}".format(C.CNN_ENCODER_PREFIX, num_encoder_layers - 1)) or name.startswith("{}{}".format(C.CNN_DECODER_PREFIX, 0)) or name.startswith("{}{}".format(C.CNN_DECODER_PREFIX, num_decoder_layers - 1))) if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_EMBEDDINGS: # Any type of learned embedding. return not (name.startswith(C.SOURCE_EMBEDDING_PREFIX) or name.startswith(C.SOURCE_POSITIONAL_EMBEDDING_PREFIX) or name.startswith(C.TARGET_EMBEDDING_PREFIX) or name.startswith(C.TARGET_POSITIONAL_EMBEDDING_PREFIX) or name.startswith(C.SHARED_EMBEDDING_PREFIX)) if strategy == C.FIXED_PARAM_STRATEGY_ALL_EXCEPT_OUTPUT_PROJ: # Target output projection. return not name.startswith(C.DEFAULT_OUTPUT_LAYER_PREFIX) raise ValueError("Unknown fixed parameter strategy: %s" % strategy) return [name for name in param_names if is_fixed(name)]
python
{ "resource": "" }
q26202
TrainingModel.get_gradients
train
def get_gradients(self) -> Dict[str, List[mx.nd.NDArray]]: """ Returns a mapping of parameters names to gradient arrays. Parameter names are prefixed with the device. """ # We may have None if not all parameters are optimized return {"dev_%d_%s" % (i, name): exe.grad_arrays[j] for i, exe in enumerate(self.executors) for j, name in enumerate(self.executor_group.arg_names) if name in self.executor_group.param_names and self.executors[0].grad_arrays[j] is not None}
python
{ "resource": "" }
q26203
TrainingModel.get_global_gradient_norm
train
def get_global_gradient_norm(self) -> float: """ Returns global gradient norm. """ # average norm across executors: exec_norms = [global_norm([arr for arr in exe.grad_arrays if arr is not None]) for exe in self.executors] norm_val = sum(exec_norms) / float(len(exec_norms)) norm_val *= self.optimizer.rescale_grad return norm_val
python
{ "resource": "" }
q26204
TrainingModel.rescale_gradients
train
def rescale_gradients(self, scale: float): """ Rescales gradient arrays of executors by scale. """ for exe in self.executors: for arr in exe.grad_arrays: if arr is None: continue arr *= scale
python
{ "resource": "" }
q26205
TrainingModel.prepare_batch
train
def prepare_batch(self, batch: mx.io.DataBatch): """ Pre-fetches the next mini-batch. :param batch: The mini-batch to prepare. """ self.module.prepare(batch)
python
{ "resource": "" }
q26206
TrainingModel.evaluate
train
def evaluate(self, eval_iter: data_io.BaseParallelSampleIter, eval_metric: mx.metric.EvalMetric): """ Resets and recomputes evaluation metric on given data iterator. """ for eval_batch in eval_iter: self.module.forward(eval_batch, is_train=False) self.module.update_metric(eval_metric, eval_batch.label)
python
{ "resource": "" }
q26207
TrainingModel.optimizer
train
def optimizer(self) -> Union[mx.optimizer.Optimizer, SockeyeOptimizer]: """ Returns the optimizer of the underlying module. """ # TODO: Push update to MXNet to expose the optimizer (Module should have a get_optimizer method) return self.current_module._optimizer
python
{ "resource": "" }
q26208
TrainingModel.initialize_optimizer
train
def initialize_optimizer(self, config: OptimizerConfig): """ Initializes the optimizer of the underlying module with an optimizer config. """ self.module.init_optimizer(kvstore=config.kvstore, optimizer=config.name, optimizer_params=config.params, force_init=True)
python
{ "resource": "" }
q26209
TrainingModel.initialize_parameters
train
def initialize_parameters(self, initializer: mx.init.Initializer, allow_missing_params: bool): """ Initializes the parameters of the underlying module. :param initializer: Parameter initializer. :param allow_missing_params: Whether to allow missing parameters. """ self.module.init_params(initializer=initializer, arg_params=self.params, aux_params=self.aux_params, allow_missing=allow_missing_params, force_init=False)
python
{ "resource": "" }
q26210
TrainingModel.log_parameters
train
def log_parameters(self): """ Logs information about model parameters. """ arg_params, aux_params = self.module.get_params() total_parameters = 0 fixed_parameters = 0 learned_parameters = 0 info = [] # type: List[str] for name, array in sorted(arg_params.items()): info.append("%s: %s" % (name, array.shape)) num_parameters = reduce(lambda x, y: x * y, array.shape) total_parameters += num_parameters if name in self.module._fixed_param_names: fixed_parameters += num_parameters else: learned_parameters += num_parameters percent_fixed = 100 * (fixed_parameters / max(1, total_parameters)) percent_learned = 100 * (learned_parameters / max(1, total_parameters)) logger.info("Model parameters: %s", ", ".join(info)) logger.info("Fixed model parameters: %s", ", ".join(self.module._fixed_param_names)) logger.info("Fixing %d parameters (%0.2f%%)", fixed_parameters, percent_fixed) logger.info("Learning %d parameters (%0.2f%%)", learned_parameters, percent_learned) logger.info("Total # of parameters: %d", total_parameters)
python
{ "resource": "" }
q26211
TrainingModel.save_params_to_file
train
def save_params_to_file(self, fname: str): """ Synchronizes parameters across devices, saves the parameters to disk, and updates self.params and self.aux_params. :param fname: Filename to write parameters to. """ arg_params, aux_params = self.module.get_params() self.module.set_params(arg_params, aux_params) self.params = arg_params self.aux_params = aux_params super().save_params_to_file(fname)
python
{ "resource": "" }
q26212
TrainingModel.load_params_from_file
train
def load_params_from_file(self, fname: str, allow_missing_params: bool = False): """ Loads parameters from a file and sets the parameters of the underlying module and this model instance. :param fname: File name to load parameters from. :param allow_missing_params: If set, the given parameters are allowed to be a subset of the Module parameters. """ super().load_params_from_file(fname) # sets self.params & self.aux_params self.module.set_params(arg_params=self.params, aux_params=self.aux_params, allow_missing=allow_missing_params)
python
{ "resource": "" }
q26213
TrainingModel.install_monitor
train
def install_monitor(self, monitor_pattern: str, monitor_stat_func_name: str): """ Installs an MXNet monitor onto the underlying module. :param monitor_pattern: Pattern string. :param monitor_stat_func_name: Name of monitor statistics function. """ self._monitor = mx.monitor.Monitor(interval=C.MEASURE_SPEED_EVERY, stat_func=C.MONITOR_STAT_FUNCS.get(monitor_stat_func_name), pattern=monitor_pattern, sort=True) self.module.install_monitor(self._monitor) logger.info("Installed MXNet monitor; pattern='%s'; statistics_func='%s'", monitor_pattern, monitor_stat_func_name)
python
{ "resource": "" }
q26214
TrainState.save
train
def save(self, fname: str): """ Saves this training state to fname. """ with open(fname, "wb") as fp: pickle.dump(self, fp)
python
{ "resource": "" }
q26215
EarlyStoppingTrainer._step
train
def _step(self, model: TrainingModel, batch: mx.io.DataBatch, checkpoint_interval: int, metric_train: mx.metric.EvalMetric, metric_loss: Optional[mx.metric.EvalMetric] = None): """ Performs an update to model given a batch and updates metrics. """ if model.monitor is not None: model.monitor.tic() #################### # Forward & Backward #################### model.run_forward_backward(batch, metric_train) # If using an extended optimizer, provide extra state information about the current batch optimizer = model.optimizer if metric_loss is not None and isinstance(optimizer, SockeyeOptimizer): # Loss for this batch metric_loss.reset() metric_loss.update(batch.label, model.module.get_outputs()) [(_, m_val)] = metric_loss.get_name_value() batch_state = BatchState(metric_val=m_val) optimizer.pre_update_batch(batch_state) ######## # UPDATE ######## if self.update_interval == 1 or self.state.batches % self.update_interval == 0: # Gradient rescaling gradient_norm = None if self.state.updates > 0 and (self.state.updates + 1) % checkpoint_interval == 0: # compute values for logging to metrics (before rescaling...) gradient_norm = self.state.gradient_norm = model.get_global_gradient_norm() self.state.gradients = model.get_gradients() # note: C.GRADIENT_CLIPPING_TYPE_ABS is handled by the mxnet optimizer directly if self.optimizer_config.gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_NORM: if gradient_norm is None: gradient_norm = model.get_global_gradient_norm() # clip gradients if gradient_norm > self.optimizer_config.gradient_clipping_threshold: ratio = self.optimizer_config.gradient_clipping_threshold / gradient_norm model.rescale_gradients(ratio) model.update() if self.update_interval > 1: model.zero_gradients() self.state.updates += 1 if model.monitor is not None: results = model.monitor.toc() if results: for _, k, v in results: logger.info('Monitor: Batch [{:d}] {:s} {:s}'.format(self.state.updates, k, v))
python
{ "resource": "" }
q26216
EarlyStoppingTrainer._update_metrics
train
def _update_metrics(self, metric_train: mx.metric.EvalMetric, metric_val: mx.metric.EvalMetric): """ Updates metrics for current checkpoint. If a process manager is given, also collects previous decoding results and spawns a new decoding process. Writes all metrics to the metrics file and optionally logs to tensorboard. """ checkpoint_metrics = {"epoch": self.state.epoch, "learning-rate": self.model.optimizer.learning_rate, "gradient-norm": self.state.gradient_norm, "time-elapsed": time.time() - self.state.start_tic} gpu_memory_usage = utils.get_gpu_memory_usage(self.model.context) checkpoint_metrics['used-gpu-memory'] = sum(v[0] for v in gpu_memory_usage.values()) checkpoint_metrics['converged'] = self.state.converged checkpoint_metrics['diverged'] = self.state.diverged for name, value in metric_train.get_name_value(): checkpoint_metrics["%s-train" % name] = value for name, value in metric_val.get_name_value(): checkpoint_metrics["%s-val" % name] = value self.state.metrics.append(checkpoint_metrics) utils.write_metrics_file(self.state.metrics, self.metrics_fname) tf_metrics = checkpoint_metrics.copy() tf_metrics.update({"%s_grad" % n: v for n, v in self.state.gradients.items()}) tf_metrics.update(self.model.params) self.tflogger.log_metrics(metrics=tf_metrics, checkpoint=self.state.checkpoint)
python
{ "resource": "" }
q26217
EarlyStoppingTrainer._cleanup
train
def _cleanup(self, lr_decay_opt_states_reset: str, process_manager: Optional['DecoderProcessManager'] = None, keep_training_state = False): """ Cleans parameter files, training state directory and waits for remaining decoding processes. """ utils.cleanup_params_files(self.model.output_dir, self.max_params_files_to_keep, self.state.checkpoint, self.state.best_checkpoint, self.keep_initializations) if process_manager is not None: result = process_manager.collect_results() if result is not None: decoded_checkpoint, decoder_metrics = result self.state.metrics[decoded_checkpoint - 1].update(decoder_metrics) self.tflogger.log_metrics(decoder_metrics, decoded_checkpoint) utils.write_metrics_file(self.state.metrics, self.metrics_fname) self.state.save(os.path.join(self.training_state_dirname, C.TRAINING_STATE_NAME)) if not keep_training_state: final_training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_DIRNAME) if os.path.exists(final_training_state_dirname): shutil.rmtree(final_training_state_dirname) if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_BEST: best_opt_states_fname = os.path.join(self.model.output_dir, C.OPT_STATES_BEST) if os.path.exists(best_opt_states_fname): os.remove(best_opt_states_fname) if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_INITIAL: initial_opt_states_fname = os.path.join(self.model.output_dir, C.OPT_STATES_INITIAL) if os.path.exists(initial_opt_states_fname): os.remove(initial_opt_states_fname)
python
{ "resource": "" }
q26218
EarlyStoppingTrainer._adjust_learning_rate
train
def _adjust_learning_rate(self, has_improved: bool, lr_decay_param_reset: bool, lr_decay_opt_states_reset: str): """ Adjusts the optimizer learning rate if required. """ if self.optimizer_config.lr_scheduler is not None: if issubclass(type(self.optimizer_config.lr_scheduler), lr_scheduler.AdaptiveLearningRateScheduler): lr_adjusted = self.optimizer_config.lr_scheduler.new_evaluation_result(has_improved) # type: ignore else: lr_adjusted = False if lr_adjusted and not has_improved: if lr_decay_param_reset: logger.info("Loading parameters from last best checkpoint: %d", self.state.best_checkpoint) self.model.load_params_from_file(self.best_params_fname) if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_INITIAL: logger.info("Loading initial optimizer states") self.model.load_optimizer_states(os.path.join(self.model.output_dir, C.OPT_STATES_INITIAL)) elif lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_BEST: logger.info("Loading optimizer states from best checkpoint: %d", self.state.best_checkpoint) self.model.load_optimizer_states(os.path.join(self.model.output_dir, C.OPT_STATES_BEST))
python
{ "resource": "" }
q26219
EarlyStoppingTrainer._create_eval_metric
train
def _create_eval_metric(metric_name: str) -> mx.metric.EvalMetric: """ Creates an EvalMetric given a metric names. """ # output_names refers to the list of outputs this metric should use to update itself, e.g. the softmax output if metric_name == C.ACCURACY: return utils.Accuracy(ignore_label=C.PAD_ID, output_names=[C.SOFTMAX_OUTPUT_NAME], label_names=[C.TARGET_LABEL_NAME]) elif metric_name == C.PERPLEXITY: return mx.metric.Perplexity(ignore_label=C.PAD_ID, output_names=[C.SOFTMAX_OUTPUT_NAME], label_names=[C.TARGET_LABEL_NAME], name=C.PERPLEXITY) elif metric_name == C.LENRATIO_MSE: return loss.LengthRatioMSEMetric(name=C.LENRATIO_MSE, output_names=[C.LENRATIO_OUTPUT_NAME], label_names=[C.LENRATIO_LABEL_OUTPUT_NAME]) else: raise ValueError("unknown metric name")
python
{ "resource": "" }
q26220
EarlyStoppingTrainer._create_eval_metric_composite
train
def _create_eval_metric_composite(metric_names: List[str]) -> mx.metric.CompositeEvalMetric: """ Creates a composite EvalMetric given a list of metric names. """ metrics = [EarlyStoppingTrainer._create_eval_metric(metric_name) for metric_name in metric_names] return mx.metric.create(metrics)
python
{ "resource": "" }
q26221
EarlyStoppingTrainer._update_best_params_link
train
def _update_best_params_link(self): """ Updates the params.best link to the latest best parameter file. """ best_params_path = self.best_params_fname actual_best_params_fname = C.PARAMS_NAME % self.state.best_checkpoint if os.path.lexists(best_params_path): os.remove(best_params_path) os.symlink(actual_best_params_fname, best_params_path)
python
{ "resource": "" }
q26222
EarlyStoppingTrainer._check_args
train
def _check_args(self, metrics: List[str], early_stopping_metric: str, lr_decay_opt_states_reset: str, lr_decay_param_reset: bool, cp_decoder: Optional[checkpoint_decoder.CheckpointDecoder] = None): """ Helper function that checks various configuration compatibilities. """ utils.check_condition(len(metrics) > 0, "At least one metric must be provided.") for metric in metrics: utils.check_condition(metric in C.METRICS, "Unknown metric to track during training: %s" % metric) if 'dist' in self.optimizer_config.kvstore: # In distributed training the optimizer will run remotely. For eve we however need to pass information about # the loss, which is not possible anymore by means of accessing self.module._curr_module._optimizer. utils.check_condition(self.optimizer_config.name != C.OPTIMIZER_EVE, "Eve optimizer not supported with distributed training.") utils.check_condition( not issubclass(type(self.optimizer_config.lr_scheduler), lr_scheduler.AdaptiveLearningRateScheduler), "Adaptive learning rate schedulers not supported with a dist kvstore. " "Try a fixed schedule such as %s." % C.LR_SCHEDULER_FIXED_RATE_INV_SQRT_T) utils.check_condition(not lr_decay_param_reset, "Parameter reset when the learning rate decays not " "supported with distributed training.") utils.check_condition(lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_OFF, "Optimizer state reset when the learning rate decays " "not supported with distributed training.") utils.check_condition(self.optimizer_config.gradient_clipping_type in C.GRADIENT_CLIPPING_TYPES, "Unknown gradient clipping type %s" % self.optimizer_config.gradient_clipping_type) utils.check_condition(early_stopping_metric in C.METRICS, "Unsupported early-stopping metric: %s" % early_stopping_metric) if early_stopping_metric in C.METRICS_REQUIRING_DECODER: utils.check_condition(cp_decoder is not None, "%s requires CheckpointDecoder" % early_stopping_metric)
python
{ "resource": "" }
q26223
EarlyStoppingTrainer._save_params
train
def _save_params(self): """ Saves model parameters at current checkpoint and optionally cleans up older parameter files to save disk space. """ self.model.save_params_to_file(self.current_params_fname) utils.cleanup_params_files(self.model.output_dir, self.max_params_files_to_keep, self.state.checkpoint, self.state.best_checkpoint, self.keep_initializations)
python
{ "resource": "" }
q26224
EarlyStoppingTrainer._save_training_state
train
def _save_training_state(self, train_iter: data_io.BaseParallelSampleIter): """ Saves current training state. """ # Create temporary directory for storing the state of the optimization process training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_TEMP_DIRNAME) if not os.path.exists(training_state_dirname): os.mkdir(training_state_dirname) # (1) Parameters: link current file params_base_fname = C.PARAMS_NAME % self.state.checkpoint params_file = os.path.join(training_state_dirname, C.TRAINING_STATE_PARAMS_NAME) if os.path.exists(params_file): os.unlink(params_file) os.symlink(os.path.join("..", params_base_fname), params_file) # (2) Optimizer states opt_state_fname = os.path.join(training_state_dirname, C.OPT_STATES_LAST) self.model.save_optimizer_states(opt_state_fname) # (3) Data iterator train_iter.save_state(os.path.join(training_state_dirname, C.BUCKET_ITER_STATE_NAME)) # (4) Random generators # RNG states: python's random and np.random provide functions for # storing the state, mxnet does not, but inside our code mxnet's RNG is # not used AFAIK with open(os.path.join(training_state_dirname, C.RNG_STATE_NAME), "wb") as fp: pickle.dump(random.getstate(), fp) pickle.dump(np.random.get_state(), fp) # (5) Training state self.state.save(os.path.join(training_state_dirname, C.TRAINING_STATE_NAME)) # (6) Learning rate scheduler with open(os.path.join(training_state_dirname, C.SCHEDULER_STATE_NAME), "wb") as fp: pickle.dump(self.optimizer_config.lr_scheduler, fp) # First we rename the existing directory to minimize the risk of state # loss if the process is aborted during deletion (which will be slower # than directory renaming) delete_training_state_dirname = os.path.join(self.model.output_dir, C.TRAINING_STATE_TEMP_DELETENAME) if os.path.exists(self.training_state_dirname): os.rename(self.training_state_dirname, delete_training_state_dirname) os.rename(training_state_dirname, self.training_state_dirname) if os.path.exists(delete_training_state_dirname): shutil.rmtree(delete_training_state_dirname)
python
{ "resource": "" }
q26225
EarlyStoppingTrainer._load_training_state
train
def _load_training_state(self, train_iter: data_io.BaseParallelSampleIter): """ Loads the full training state from disk. :param train_iter: training data iterator. """ # (1) Parameters params_fname = os.path.join(self.training_state_dirname, C.TRAINING_STATE_PARAMS_NAME) self.model.load_params_from_file(params_fname) # (2) Optimizer states opt_state_fname = os.path.join(self.training_state_dirname, C.OPT_STATES_LAST) self.model.load_optimizer_states(opt_state_fname) # (3) Data Iterator train_iter.load_state(os.path.join(self.training_state_dirname, C.BUCKET_ITER_STATE_NAME)) # (4) Random generators # RNG states: python's random and np.random provide functions for # storing the state, mxnet does not, but inside our code mxnet's RNG is # not used AFAIK with open(os.path.join(self.training_state_dirname, C.RNG_STATE_NAME), "rb") as fp: random.setstate(pickle.load(fp)) np.random.set_state(pickle.load(fp)) # (5) Training state self.state = TrainState.load(os.path.join(self.training_state_dirname, C.TRAINING_STATE_NAME)) # (6) Learning rate scheduler with open(os.path.join(self.training_state_dirname, C.SCHEDULER_STATE_NAME), "rb") as fp: self.optimizer_config.set_lr_scheduler(pickle.load(fp)) # initialize optimizer again self._initialize_optimizer()
python
{ "resource": "" }
q26226
DecoderProcessManager.start_decoder
train
def start_decoder(self, checkpoint: int): """ Starts a new CheckpointDecoder process and returns. No other process may exist. :param checkpoint: The checkpoint to decode. """ assert self.decoder_process is None output_name = os.path.join(self.output_folder, C.DECODE_OUT_NAME % checkpoint) self.decoder_process = self.ctx.Process(target=_decode_and_evaluate, args=(self.decoder, checkpoint, output_name, self.decoder_metric_queue)) self.decoder_process.name = 'Decoder-%d' % checkpoint logger.info("Starting process: %s", self.decoder_process.name) self.decoder_process.start() self._results_pending = True
python
{ "resource": "" }
q26227
DecoderProcessManager.collect_results
train
def collect_results(self) -> Optional[Tuple[int, Dict[str, float]]]: """ Returns the decoded checkpoint and the decoder metrics or None if the queue is empty. """ self.wait_to_finish() if self.decoder_metric_queue.empty(): if self._results_pending: self._any_process_died = True self._results_pending = False return None decoded_checkpoint, decoder_metrics = self.decoder_metric_queue.get() assert self.decoder_metric_queue.empty() self._results_pending = False logger.info("Decoder-%d finished: %s", decoded_checkpoint, decoder_metrics) return decoded_checkpoint, decoder_metrics
python
{ "resource": "" }
q26228
DecoderProcessManager.update_process_died_status
train
def update_process_died_status(self): """ Update the flag indicating whether any process exited and did not provide a result. """ # There is a result pending, the process is no longer alive, yet there is no result in the queue # This means the decoder process has not succesfully produced metrics queue_should_hold_result = self._results_pending and self.decoder_process is not None and not self.decoder_process.is_alive() if queue_should_hold_result and self.decoder_metric_queue.empty(): self._any_process_died = True
python
{ "resource": "" }
q26229
average
train
def average(param_paths: Iterable[str]) -> Dict[str, mx.nd.NDArray]: """ Averages parameters from a list of .params file paths. :param param_paths: List of paths to parameter files. :return: Averaged parameter dictionary. """ all_arg_params = [] all_aux_params = [] for path in param_paths: logger.info("Loading parameters from '%s'", path) arg_params, aux_params = utils.load_params(path) all_arg_params.append(arg_params) all_aux_params.append(aux_params) logger.info("%d models loaded", len(all_arg_params)) utils.check_condition(all(all_arg_params[0].keys() == p.keys() for p in all_arg_params), "arg_param names do not match across models") utils.check_condition(all(all_aux_params[0].keys() == p.keys() for p in all_aux_params), "aux_param names do not match across models") avg_params = {} # average arg_params for k in all_arg_params[0]: arrays = [p[k] for p in all_arg_params] avg_params["arg:" + k] = utils.average_arrays(arrays) # average aux_params for k in all_aux_params[0]: arrays = [p[k] for p in all_aux_params] avg_params["aux:" + k] = utils.average_arrays(arrays) return avg_params
python
{ "resource": "" }
q26230
find_checkpoints
train
def find_checkpoints(model_path: str, size=4, strategy="best", metric: str = C.PERPLEXITY) -> List[str]: """ Finds N best points from .metrics file according to strategy. :param model_path: Path to model. :param size: Number of checkpoints to combine. :param strategy: Combination strategy. :param metric: Metric according to which checkpoints are selected. Corresponds to columns in model/metrics file. :return: List of paths corresponding to chosen checkpoints. """ maximize = C.METRIC_MAXIMIZE[metric] points = utils.get_validation_metric_points(model_path=model_path, metric=metric) # keep only points for which .param files exist param_path = os.path.join(model_path, C.PARAMS_NAME) points = [(value, checkpoint) for value, checkpoint in points if os.path.exists(param_path % checkpoint)] if strategy == "best": # N best scoring points top_n = _strategy_best(points, size, maximize) elif strategy == "last": # N sequential points ending with overall best top_n = _strategy_last(points, size, maximize) elif strategy == "lifespan": # Track lifespan of every "new best" point # Points dominated by a previous better point have lifespan 0 top_n = _strategy_lifespan(points, size, maximize) else: raise RuntimeError("Unknown strategy, options: best last lifespan") # Assemble paths for params files corresponding to chosen checkpoints # Last element in point is always the checkpoint id params_paths = [ os.path.join(model_path, C.PARAMS_NAME % point[-1]) for point in top_n ] # Report logger.info("Found: " + ", ".join(str(point) for point in top_n)) return params_paths
python
{ "resource": "" }
q26231
main
train
def main(): """ Commandline interface to average parameters. """ setup_main_logger(console=True, file_logging=False) params = argparse.ArgumentParser(description="Averages parameters from multiple models.") arguments.add_average_args(params) args = params.parse_args() average_parameters(args)
python
{ "resource": "" }
q26232
get_lr_scheduler
train
def get_lr_scheduler(scheduler_type: str, updates_per_checkpoint: int, learning_rate_half_life: int, learning_rate_reduce_factor: float, learning_rate_reduce_num_not_improved: int, learning_rate_schedule: Optional[List[Tuple[float, int]]] = None, learning_rate_warmup: Optional[int] = 0) -> Optional[LearningRateScheduler]: """ Returns a learning rate scheduler. :param scheduler_type: Scheduler type. :param updates_per_checkpoint: Number of batches between checkpoints. :param learning_rate_half_life: Half life of the learning rate in number of checkpoints. :param learning_rate_reduce_factor: Factor to reduce learning rate with. :param learning_rate_reduce_num_not_improved: Number of checkpoints with no improvement after which learning rate is reduced. :param learning_rate_schedule: Optional fixed learning rate schedule. :param learning_rate_warmup: Number of batches that the learning rate is linearly increased. :raises: ValueError if unknown scheduler_type :return: Learning rate scheduler. """ check_condition(learning_rate_schedule is None or scheduler_type == C.LR_SCHEDULER_FIXED_STEP, "Learning rate schedule can only be used with '%s' learning rate scheduler." % C.LR_SCHEDULER_FIXED_STEP) if scheduler_type is None: return None if scheduler_type == C.LR_SCHEDULER_FIXED_RATE_INV_SQRT_T: return LearningRateSchedulerInvSqrtT(updates_per_checkpoint, learning_rate_half_life, learning_rate_warmup) elif scheduler_type == C.LR_SCHEDULER_FIXED_RATE_INV_T: return LearningRateSchedulerInvT(updates_per_checkpoint, learning_rate_half_life, learning_rate_warmup) elif scheduler_type == C.LR_SCHEDULER_FIXED_STEP: check_condition(learning_rate_schedule is not None, "learning_rate_schedule needed for %s scheduler" % C.LR_SCHEDULER_FIXED_STEP) return LearningRateSchedulerFixedStep(learning_rate_schedule, updates_per_checkpoint) elif scheduler_type == C.LR_SCHEDULER_PLATEAU_REDUCE: check_condition(learning_rate_reduce_factor is not None, "learning_rate_reduce_factor needed for %s scheduler" % C.LR_SCHEDULER_PLATEAU_REDUCE) check_condition(learning_rate_reduce_num_not_improved is not None, "learning_rate_reduce_num_not_improved needed for %s scheduler" % C.LR_SCHEDULER_PLATEAU_REDUCE) if learning_rate_reduce_factor >= 1.0: logger.warning("Not using %s learning rate scheduling: learning_rate_reduce_factor == 1.0" % C.LR_SCHEDULER_PLATEAU_REDUCE) return None return LearningRateSchedulerPlateauReduce(learning_rate_reduce_factor, learning_rate_reduce_num_not_improved, learning_rate_warmup) else: raise ValueError("Unknown learning rate scheduler type %s." % scheduler_type)
python
{ "resource": "" }
q26233
LearningRateScheduler._warmup
train
def _warmup(self, num_updates): """ Returns linearly increasing fraction of base_lr. """ assert self.base_lr is not None if not self.warmup: return self.base_lr fraction = (num_updates + 1) * self.base_lr / (self.warmup + 1) if num_updates > self.last_warmup_log and num_updates % self.log_warmup_every_t == 0: self.last_warmup_log = num_updates logger.info("Learning rate warmup: %3.0f%%", fraction / self.base_lr * 100.0) return fraction
python
{ "resource": "" }
q26234
LearningRateSchedulerFixedStep.parse_schedule_str
train
def parse_schedule_str(schedule_str: str) -> List[Tuple[float, int]]: """ Parse learning schedule string. :param schedule_str: String in form rate1:num_updates1[,rate2:num_updates2,...] :return: List of tuples (learning_rate, num_updates). """ schedule = list() for step in schedule_str.split(","): rate, num_updates = step.split(":") schedule.append((float(rate), int(num_updates))) return schedule
python
{ "resource": "" }
q26235
copy_mx_model_to
train
def copy_mx_model_to(model_path, model_epoch, output_folder): """ Copy mxnet models to new path. :param model_path: Model path without -symbol.json and -%04d.params :param model_epoch: Epoch of the pretrained model :param output_folder: Output folder :return: New folder where the files are moved to """ target_path = os.path.join(output_folder, os.path.basename(model_path)) logger.info("Copying image model from {} to {}".format(model_path, target_path)) suffix = ['-symbol.json', '-%04d.params' % (model_epoch,)] for s in suffix: copyfile(model_path + s, target_path + s) return target_path
python
{ "resource": "" }
q26236
crop_resize_image
train
def crop_resize_image(image: np.ndarray, size) -> np.ndarray: """ Resize the input image. :param image: Original image which is a PIL object. :param size: Tuple of height and width to resize the image to. :return: Resized image which is a PIL object """ width, height = image.size if width > height: left = (width - height) / 2 right = width - left top = 0 bottom = height else: top = (height - width) / 2 bottom = height - top left = 0 right = width image = image.crop((left, top, right, bottom)) image = image.resize(size, Image.ANTIALIAS) return image
python
{ "resource": "" }
q26237
load_preprocess_images
train
def load_preprocess_images(image_paths: List[str], image_size: tuple) -> List[np.ndarray]: """ Load and pre-process the images specified with absolute paths. :param image_paths: List of images specified with paths. :param image_size: Tuple to resize the image to (Channels, Height, Width) :return: A list of loaded images (numpy arrays). """ image_size = image_size[1:] # we do not need the number of channels images = [] for image_path in image_paths: images.append(load_preprocess_image(image_path, image_size)) return images
python
{ "resource": "" }
q26238
load_features
train
def load_features(paths: List[str], expected_shape: Optional[tuple] = None) -> List[np.ndarray]: """ Load features specified with absolute paths. :param paths: List of files specified with paths. :param expected_shape: Optional expected shape. :return: A list of loaded images (numpy arrays). """ data = [] # type: List[np.ndarray] for path in paths: data.append(load_feature(path, expected_shape)) return data
python
{ "resource": "" }
q26239
save_features
train
def save_features(paths: List[str], datas: List[np.ndarray], compressed: bool = False) -> List: """ Save features specified with absolute paths. :param paths: List of files specified with paths. :param datas: List of numpy ndarrays to save into the respective files :param compressed: Use numpy compression :return: A list of file names. """ fnames = [] # type: List[str] for path, data in zip(paths, datas): fnames.append(save_feature(path, data, compressed)) return fnames
python
{ "resource": "" }
q26240
zero_pad_features
train
def zero_pad_features(features: List[np.ndarray], target_shape: tuple) -> List[np.ndarray]: """ Zero pad to numpy array. :param features: List of numpy arrays. :param target_shape: Target shape of each numpy array in the list feat. Note: target_shape should be greater that the largest shapes in feat. :return: A list of padded numpy arrays. """ pad_features = [] for feature in features: feature_shape = feature.shape if len(feature_shape) < len(target_shape): # add extra dimensions for i in range(len(target_shape) - len(feature_shape)): feature = np.expand_dims(feature, axis=len(feature.shape) + 1) feature_shape = feature.shape elif len(feature_shape) > len(target_shape): raise ValueError("Provided target shape must be bigger then the original " "shape. (provided: {}, original {})".format(len(target_shape), len(feature_shape))) diff_shape = np.subtract(target_shape, feature_shape) # pylint: disable=assignment-from-no-return if np.any(diff_shape < 0): raise ValueError("Provided target values must be bigger then the original " "values for each dimension. (provided: {}, original {})".format(target_shape, feature_shape)) # pad format: ((before_1, after_1), ... (before_N, after_N)) diff_shape = [[0, d] for d in diff_shape] # pylint: disable=not-an-iterable p = np.pad(feature, diff_shape, 'constant', constant_values=0) pad_features.append(p) return pad_features
python
{ "resource": "" }
q26241
get_initializer
train
def get_initializer(default_init_type: str, default_init_scale: float, default_init_xavier_rand_type: str, default_init_xavier_factor_type: str, embed_init_type: str, embed_init_sigma: float, rnn_init_type: str, extra_initializers: Optional[List[Tuple[str, mx.initializer.Initializer]]] = None) -> mx.initializer.Initializer: """ Returns a mixed MXNet initializer. :param default_init_type: The default weight initializer type. :param default_init_scale: The scale used for default weight initialization (only used with uniform initialization). :param default_init_xavier_rand_type: Xavier random number generator type. :param default_init_xavier_factor_type: Xavier factor type. :param embed_init_type: Embedding matrix initialization type. :param embed_init_sigma: Sigma for normal initialization of embedding matrix. :param rnn_init_type: Initialization type for RNN h2h matrices. :param extra_initializers: Optional initializers provided from other sources. :return: Mixed initializer. """ # default initializer if default_init_type == C.INIT_XAVIER: default_init = [(C.DEFAULT_INIT_PATTERN, mx.init.Xavier(rnd_type=default_init_xavier_rand_type, factor_type=default_init_xavier_factor_type, magnitude=default_init_scale))] elif default_init_type == C.INIT_UNIFORM: default_init = [(C.DEFAULT_INIT_PATTERN, mx.init.Uniform(scale=default_init_scale))] else: raise ValueError("Unknown default initializer %s." % default_init_type) # embedding initializer if embed_init_type == C.EMBED_INIT_NORMAL: embed_init = [(C.EMBED_INIT_PATTERN, mx.init.Normal(sigma=embed_init_sigma))] elif embed_init_type == C.EMBED_INIT_DEFAULT: embed_init = [] else: raise ValueError('Unknown embedding initializer: %s' % embed_init_type) # rnn initializer if rnn_init_type == C.RNN_INIT_ORTHOGONAL: rnn_init = [(C.RNN_INIT_PATTERN, mx.initializer.Orthogonal())] elif rnn_init_type == C.RNN_INIT_ORTHOGONAL_STACKED: rnn_init = [(C.RNN_INIT_PATTERN, StackedOrthogonalInit(scale=1.0, rand_type="eye"))] elif rnn_init_type == C.RNN_INIT_DEFAULT: rnn_init = [] else: raise ValueError('Unknown RNN initializer: %s' % rnn_init_type) params_init_pairs = embed_init + rnn_init + default_init if extra_initializers is not None: params_init_pairs = extra_initializers + params_init_pairs return mx.initializer.Mixed(*zip(*params_init_pairs))
python
{ "resource": "" }
q26242
regular_folder
train
def regular_folder() -> Callable: """ Returns a method that can be used in argument parsing to check the argument is a directory. :return: A method that can be used as a type in argparse. """ def check_regular_directory(value_to_check): value_to_check = str(value_to_check) if not os.path.isdir(value_to_check): raise argparse.ArgumentTypeError("must be a directory.") return value_to_check return check_regular_directory
python
{ "resource": "" }
q26243
int_greater_or_equal
train
def int_greater_or_equal(threshold: int) -> Callable: """ Returns a method that can be used in argument parsing to check that the int argument is greater or equal to `threshold`. :param threshold: The threshold that we assume the cli argument value is greater or equal to. :return: A method that can be used as a type in argparse. """ def check_greater_equal(value: str): value_to_check = int(value) if value_to_check < threshold: raise argparse.ArgumentTypeError("must be greater or equal to %d." % threshold) return value_to_check return check_greater_equal
python
{ "resource": "" }
q26244
float_greater_or_equal
train
def float_greater_or_equal(threshold: float) -> Callable: """ Returns a method that can be used in argument parsing to check that the float argument is greater or equal to `threshold`. :param threshold: The threshold that we assume the cli argument value is greater or equal to. :return: A method that can be used as a type in argparse. """ def check_greater_equal(value: str): value_to_check = float(value) if value_to_check < threshold: raise argparse.ArgumentTypeError("must be greater or equal to %f." % threshold) return value_to_check return check_greater_equal
python
{ "resource": "" }
q26245
learning_schedule
train
def learning_schedule() -> Callable: """ Returns a method that can be used in argument parsing to check that the argument is a valid learning rate schedule string. :return: A method that can be used as a type in argparse. """ def parse(schedule_str): try: schedule = LearningRateSchedulerFixedStep.parse_schedule_str(schedule_str) except ValueError: raise argparse.ArgumentTypeError( "Learning rate schedule string should have form rate1:num_updates1[,rate2:num_updates2,...]") return schedule return parse
python
{ "resource": "" }
q26246
simple_dict
train
def simple_dict() -> Callable: """ A simple dictionary format that does not require spaces or quoting. Supported types: bool, int, float :return: A method that can be used as a type in argparse. """ def parse(dict_str: str): def _parse(value: str): if value == "True": return True if value == "False": return False if "." in value: return float(value) return int(value) _dict = dict() try: for entry in dict_str.split(","): key, value = entry.split(":") _dict[key] = _parse(value) except ValueError: raise argparse.ArgumentTypeError("Specify argument dictionary as key1:value1,key2:value2,..." " Supported types: bool, int, float.") return _dict return parse
python
{ "resource": "" }
q26247
file_or_stdin
train
def file_or_stdin() -> Callable: """ Returns a file descriptor from stdin or opening a file from a given path. """ def parse(path): if path is None or path == "-": return sys.stdin else: return data_io.smart_open(path) return parse
python
{ "resource": "" }
q26248
_extract
train
def _extract(param_names: List[str], params: Dict[str, mx.nd.NDArray], ext_params: Dict[str, np.ndarray]) -> List[str]: """ Extract specific parameters from a given base. :param param_names: Names of parameters to be extracted. :param params: Mapping from parameter names to the actual NDArrays parameters. :param ext_params: Extracted parameter dictionary. :return: Remaining names of parameters to be extracted. """ remaining_param_names = list(param_names) for name in param_names: if name in params: logger.info("\tFound '%s': shape=%s", name, str(params[name].shape)) ext_params[name] = params[name].asnumpy() remaining_param_names.remove(name) return remaining_param_names
python
{ "resource": "" }
q26249
extract
train
def extract(param_path: str, param_names: List[str], list_all: bool) -> Dict[str, np.ndarray]: """ Extract specific parameters given their names. :param param_path: Path to the parameter file. :param param_names: Names of parameters to be extracted. :param list_all: List names of all available parameters. :return: Extracted parameter dictionary. """ logger.info("Loading parameters from '%s'", param_path) arg_params, aux_params = utils.load_params(param_path) ext_params = {} # type: Dict[str, np.ndarray] param_names = _extract(param_names, arg_params, ext_params) param_names = _extract(param_names, aux_params, ext_params) if len(param_names) > 0: logger.info("The following parameters were not found:") for name in param_names: logger.info("\t%s", name) logger.info("Check the following availabilities") list_all = True if list_all: if arg_params: logger.info("Available arg parameters:") for name in arg_params: logger.info("\t%s: shape=%s", name, str(arg_params[name].shape)) if aux_params: logger.info("Available aux parameters:") for name in aux_params: logger.info("\t%s: shape=%s", name, str(aux_params[name].shape)) return ext_params
python
{ "resource": "" }
q26250
main
train
def main(): """ Commandline interface to extract parameters. """ setup_main_logger(console=True, file_logging=False) params = argparse.ArgumentParser(description="Extract specific parameters.") arguments.add_extract_args(params) args = params.parse_args() extract_parameters(args)
python
{ "resource": "" }
q26251
Decoder.register
train
def register(cls, config_type: Type[DecoderConfig], suffix: str): """ Registers decoder type for configuration. Suffix is appended to decoder prefix. :param config_type: Configuration type for decoder. :param suffix: String to append to decoder prefix. :return: Class decorator. """ def wrapper(target_cls): cls.__registry[config_type] = (target_cls, suffix) return target_cls return wrapper
python
{ "resource": "" }
q26252
Decoder.get_decoder
train
def get_decoder(cls, config: DecoderConfig, prefix: str) -> 'Decoder': """ Creates decoder based on config type. :param config: Decoder config. :param prefix: Prefix to prepend for decoder. :return: Decoder instance. """ config_type = type(config) if config_type not in cls.__registry: raise ValueError('Unsupported decoder configuration %s' % config_type.__name__) decoder_cls, suffix = cls.__registry[config_type] # TODO: move final suffix/prefix construction logic into config builder return decoder_cls(config=config, prefix=prefix + suffix)
python
{ "resource": "" }
q26253
Decoder.decode_sequence
train
def decode_sequence(self, source_encoded: mx.sym.Symbol, source_encoded_lengths: mx.sym.Symbol, source_encoded_max_length: int, target_embed: mx.sym.Symbol, target_embed_lengths: mx.sym.Symbol, target_embed_max_length: int) -> mx.sym.Symbol: """ Decodes a sequence of embedded target words and returns sequence of last decoder representations for each time step. :param source_encoded: Encoded source: (batch_size, source_encoded_max_length, encoder_depth). :param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,). :param source_encoded_max_length: Size of encoder time dimension. :param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed). :param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,). :param target_embed_max_length: Dimension of the embedded target sequence. :return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth). """ pass
python
{ "resource": "" }
q26254
Decoder.decode_step
train
def decode_step(self, step: int, target_embed_prev: mx.sym.Symbol, source_encoded_max_length: int, *states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]: """ Decodes a single time step given the current step, the previous embedded target word, and previous decoder states. Returns decoder representation for the next prediction, attention probabilities, and next decoder states. Implementations can maintain an arbitrary number of states. :param step: Global step of inference procedure, starts with 1. :param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed). :param source_encoded_max_length: Length of encoded source time dimension. :param states: Arbitrary list of decoder states. :return: logit inputs, attention probabilities, next decoder states. """ pass
python
{ "resource": "" }
q26255
Decoder.init_states
train
def init_states(self, source_encoded: mx.sym.Symbol, source_encoded_lengths: mx.sym.Symbol, source_encoded_max_length: int) -> List[mx.sym.Symbol]: """ Returns a list of symbolic states that represent the initial states of this decoder. Used for inference. :param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth). :param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,). :param source_encoded_max_length: Size of encoder time dimension. :return: List of symbolic initial states. """ pass
python
{ "resource": "" }
q26256
Decoder.state_shapes
train
def state_shapes(self, batch_size: int, target_max_length: int, source_encoded_max_length: int, source_encoded_depth: int) -> List[mx.io.DataDesc]: """ Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth. Used for inference. :param batch_size: Batch size during inference. :param target_max_length: Current target sequence length. :param source_encoded_max_length: Size of encoder time dimension. :param source_encoded_depth: Depth of encoded source. :return: List of shape descriptions. """ pass
python
{ "resource": "" }
q26257
JSONOutputHandler.handle
train
def handle(self, t_input: inference.TranslatorInput, t_output: inference.TranslatorOutput, t_walltime: float = 0.): """ Outputs a JSON object of the fields in the `TranslatorOutput` object. """ d_ = t_output.json(self.align_threshold) self.stream.write("%s\n" % json.dumps(d_, sort_keys=True)) self.stream.flush()
python
{ "resource": "" }
q26258
_add_graph_level
train
def _add_graph_level(graph, level, parent_ids, names, scores, normalized_scores, include_pad): """Adds a level to the passed graph""" for i, parent_id in enumerate(parent_ids): if not include_pad and names[i] == PAD_TOKEN: continue new_node = (level, i) parent_node = (level - 1, parent_id) raw_score = '%.3f' % float(scores[i]) if scores[i] is not None else '-inf' norm_score = '%.3f' % float(normalized_scores[i]) if normalized_scores[i] is not None else '-inf' graph.add_node(new_node) graph.node[new_node]["name"] = names[i] graph.node[new_node]["score"] = "[RAW] {}".format(raw_score) graph.node[new_node]["norm_score"] = "[NORM] {}".format(norm_score) graph.node[new_node]["size"] = 100 # Add an edge to the parent graph.add_edge(parent_node, new_node)
python
{ "resource": "" }
q26259
get_data_iters_and_vocabs
train
def get_data_iters_and_vocabs(args: argparse.Namespace, model_folder: Optional[str]) -> Tuple['data_io.BaseParallelSampleIter', List[vocab.Vocab], vocab.Vocab, model.ModelConfig]: """ Loads the data iterators and vocabularies. :param args: Arguments as returned by argparse. :param model_folder: Output folder. :return: The scoring data iterator as well as the source and target vocabularies. """ model_config = model.SockeyeModel.load_config(os.path.join(args.model, C.CONFIG_NAME)) if args.max_seq_len is None: max_seq_len_source = model_config.config_data.max_seq_len_source max_seq_len_target = model_config.config_data.max_seq_len_target else: max_seq_len_source, max_seq_len_target = args.max_seq_len batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids) # Load the existing vocabs created when starting the training run. source_vocabs = vocab.load_source_vocabs(model_folder) target_vocab = vocab.load_target_vocab(model_folder) sources = [args.source] + args.source_factors sources = [str(os.path.abspath(source)) for source in sources] score_iter = data_io.get_scoring_data_iters( sources=sources, target=os.path.abspath(args.target), source_vocabs=source_vocabs, target_vocab=target_vocab, batch_size=args.batch_size, batch_num_devices=batch_num_devices, max_seq_len_source=max_seq_len_source, max_seq_len_target=max_seq_len_target) return score_iter, source_vocabs, target_vocab, model_config
python
{ "resource": "" }
q26260
get_image_cnn_encoder
train
def get_image_cnn_encoder(config: ImageLoadedCnnEncoderConfig) -> 'Encoder': """ Creates a image encoder. :param config: Configuration for image encoder. :return: Encoder instance. """ encoders = list() # type: List[Encoder] max_seq_len = config.encoded_seq_len if not config.no_global_descriptor: max_seq_len += 1 encoders.append(get_positional_embedding(config.positional_embedding_type, config.num_embed, max_seq_len=max_seq_len, fixed_pos_embed_scale_up_input=False, fixed_pos_embed_scale_down_positions=True, prefix=C.SOURCE_POSITIONAL_EMBEDDING_PREFIX)) encoders.append(ImageLoadedCnnEncoder(config=config)) return EncoderSequence(encoders)
python
{ "resource": "" }
q26261
ImageLoadedCnnEncoder.get_initializers
train
def get_initializers(self) -> List[Tuple[str, mx.init.Initializer]]: """ Get the initializers of the network, considering the pretrained models. :return: List of tuples (string name, mxnet initializer) """ patterns_vals = [] # Load from args/auxs for k in self.args.keys(): patterns_vals.append((k, mx.init.Load({k: self.args[k]}))) for k in self.auxs.keys(): patterns_vals.append((k, mx.init.Load({k: self.auxs[k]}))) # Initialize for k in self.names: patterns_vals.append((k, mx.init.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3))) return patterns_vals
python
{ "resource": "" }
q26262
ImageLoadedCnnEncoder.get_fixed_param_names
train
def get_fixed_param_names(self) -> List[str]: """ Get the fixed params of the network. :return: List of strings, names of the layers """ args = set(self.args.keys()) | set(self.auxs.keys()) return list(args & set(self.sym.list_arguments()))
python
{ "resource": "" }
q26263
get_bucket
train
def get_bucket(seq_len: int, buckets: List[int]) -> Optional[int]: """ Given sequence length and a list of buckets, return corresponding bucket. :param seq_len: Sequence length. :param buckets: List of buckets. :return: Chosen bucket. """ bucket_idx = bisect.bisect_left(buckets, seq_len) if bucket_idx == len(buckets): return None return buckets[bucket_idx]
python
{ "resource": "" }
q26264
calculate_length_statistics
train
def calculate_length_statistics(source_iterables: Sequence[Iterable[Any]], target_iterable: Iterable[Any], max_seq_len_source: int, max_seq_len_target: int) -> 'LengthStatistics': """ Returns mean and standard deviation of target-to-source length ratios of parallel corpus. :param source_iterables: Source sequence readers. :param target_iterable: Target sequence reader. :param max_seq_len_source: Maximum source sequence length. :param max_seq_len_target: Maximum target sequence length. :return: The number of sentences as well as the mean and standard deviation of target to source length ratios. """ mean_and_variance = OnlineMeanAndVariance() for sources, target in parallel_iter(source_iterables, target_iterable): source_len = len(sources[0]) target_len = len(target) if source_len > max_seq_len_source or target_len > max_seq_len_target: continue length_ratio = target_len / source_len mean_and_variance.update(length_ratio) return LengthStatistics(mean_and_variance.count, mean_and_variance.mean, mean_and_variance.std)
python
{ "resource": "" }
q26265
are_none
train
def are_none(sequences: Sequence[Sized]) -> bool: """ Returns True if all sequences are None. """ if not sequences: return True return all(s is None for s in sequences)
python
{ "resource": "" }
q26266
are_token_parallel
train
def are_token_parallel(sequences: Sequence[Sized]) -> bool: """ Returns True if all sequences in the list have the same length. """ if not sequences or len(sequences) == 1: return True return all(len(s) == len(sequences[0]) for s in sequences)
python
{ "resource": "" }
q26267
get_num_shards
train
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int: """ Returns the number of shards. :param num_samples: Number of training data samples. :param samples_per_shard: Samples per shard. :param min_num_shards: Minimum number of shards. :return: Number of shards. """ return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards)
python
{ "resource": "" }
q26268
get_scoring_data_iters
train
def get_scoring_data_iters(sources: List[str], target: str, source_vocabs: List[vocab.Vocab], target_vocab: vocab.Vocab, batch_size: int, batch_num_devices: int, max_seq_len_source: int, max_seq_len_target: int) -> 'BaseParallelSampleIter': """ Returns a data iterator for scoring. The iterator loads data on demand, batch by batch, and does not skip any lines. Lines that are too long are truncated. :param sources: Path to source training data (with optional factor data paths). :param target: Path to target training data. :param source_vocabs: Source vocabulary and optional factor vocabularies. :param target_vocab: Target vocabulary. :param batch_size: Batch size. :param batch_num_devices: Number of devices batches will be parallelized across. :param max_seq_len_source: Maximum source sequence length. :param max_seq_len_target: Maximum target sequence length. :return: The scoring data iterator. """ logger.info("==============================") logger.info("Creating scoring data iterator") logger.info("==============================") # One bucket to hold them all, bucket = (max_seq_len_source, max_seq_len_target) # ...One loader to raise them, data_loader = RawParallelDatasetLoader(buckets=[bucket], eos_id=target_vocab[C.EOS_SYMBOL], pad_id=C.PAD_ID, skip_blanks=False) # ...one iterator to traverse them all, scoring_iter = BatchedRawParallelSampleIter(data_loader=data_loader, sources=sources, target=target, source_vocabs=source_vocabs, target_vocab=target_vocab, bucket=bucket, batch_size=batch_size, max_lens=(max_seq_len_source, max_seq_len_target), num_factors=len(sources)) # and with the model appraise them. return scoring_iter
python
{ "resource": "" }
q26269
describe_data_and_buckets
train
def describe_data_and_buckets(data_statistics: DataStatistics, bucket_batch_sizes: List[BucketBatchSize]): """ Describes statistics across buckets """ check_condition(len(bucket_batch_sizes) == len(data_statistics.buckets), "Number of bucket batch sizes (%d) does not match number of buckets in statistics (%d)." % (len(bucket_batch_sizes), len(data_statistics.buckets))) if data_statistics.length_ratio_stats_per_bucket: for bucket_batch_size, num_seq, (lr_mean, lr_std) in zip(bucket_batch_sizes, data_statistics.num_sents_per_bucket, data_statistics.length_ratio_stats_per_bucket): if num_seq > 0: logger.info("Bucket %s: %d samples in %d batches of %d, ~%.1f tokens/batch, " "trg/src length ratio: %.2f (+-%.2f)", bucket_batch_size.bucket, num_seq, math.ceil(num_seq / bucket_batch_size.batch_size), bucket_batch_size.batch_size, bucket_batch_size.average_words_per_batch, lr_mean, lr_std) else: # TODO: remove with next bump of C.PREPARED_DATA_VERSION for bucket_batch_size, num_seq in zip(bucket_batch_sizes, data_statistics.num_sents_per_bucket): if num_seq > 0: logger.info("Bucket %s: %d samples in %d batches of %d, ~%.1f tokens/batch, ", bucket_batch_size.bucket, num_seq, math.ceil(num_seq / bucket_batch_size.batch_size), bucket_batch_size.batch_size, bucket_batch_size.average_words_per_batch)
python
{ "resource": "" }
q26270
read_content
train
def read_content(path: str, limit: Optional[int] = None) -> Iterator[List[str]]: """ Returns a list of tokens for each line in path up to a limit. :param path: Path to files containing sentences. :param limit: How many lines to read from path. :return: Iterator over lists of words. """ with smart_open(path) as indata: for i, line in enumerate(indata): if limit is not None and i == limit: break yield list(get_tokens(line))
python
{ "resource": "" }
q26271
tokens2ids
train
def tokens2ids(tokens: Iterable[str], vocab: Dict[str, int]) -> List[int]: """ Returns sequence of integer ids given a sequence of tokens and vocab. :param tokens: List of string tokens. :param vocab: Vocabulary (containing UNK symbol). :return: List of word ids. """ return [vocab.get(w, vocab[C.UNK_SYMBOL]) for w in tokens]
python
{ "resource": "" }
q26272
strids2ids
train
def strids2ids(tokens: Iterable[str]) -> List[int]: """ Returns sequence of integer ids given a sequence of string ids. :param tokens: List of integer tokens. :return: List of word ids. """ return list(map(int, tokens))
python
{ "resource": "" }
q26273
ids2strids
train
def ids2strids(ids: Iterable[int]) -> str: """ Returns a string representation of a sequence of integers. :param ids: Sequence of integers. :return: String sequence """ return C.TOKEN_SEPARATOR.join(map(str, ids))
python
{ "resource": "" }
q26274
ids2tokens
train
def ids2tokens(token_ids: Iterable[int], vocab_inv: Dict[int, str], exclude_set: Set[int]) -> Iterator[str]: """ Transforms a list of token IDs into a list of words, excluding any IDs in `exclude_set`. :param token_ids: The list of token IDs. :param vocab_inv: The inverse vocabulary. :param exclude_set: The list of token IDs to exclude. :return: The list of words. """ tokens = (vocab_inv[token] for token in token_ids) return (tok for token_id, tok in zip(token_ids, tokens) if token_id not in exclude_set)
python
{ "resource": "" }
q26275
create_sequence_readers
train
def create_sequence_readers(sources: List[str], target: str, vocab_sources: List[vocab.Vocab], vocab_target: vocab.Vocab) -> Tuple[List[SequenceReader], SequenceReader]: """ Create source readers with EOS and target readers with BOS. :param sources: The file names of source data and factors. :param target: The file name of the target data. :param vocab_sources: The source vocabularies. :param vocab_target: The target vocabularies. :return: The source sequence readers and the target reader. """ source_sequence_readers = [SequenceReader(source, vocab, add_eos=True) for source, vocab in zip(sources, vocab_sources)] target_sequence_reader = SequenceReader(target, vocab_target, add_bos=True) return source_sequence_readers, target_sequence_reader
python
{ "resource": "" }
q26276
get_default_bucket_key
train
def get_default_bucket_key(buckets: List[Tuple[int, int]]) -> Tuple[int, int]: """ Returns the default bucket from a list of buckets, i.e. the largest bucket. :param buckets: List of buckets. :return: The largest bucket in the list. """ return max(buckets)
python
{ "resource": "" }
q26277
get_permutations
train
def get_permutations(bucket_counts: List[int]) -> Tuple[List[mx.nd.NDArray], List[mx.nd.NDArray]]: """ Returns the indices of a random permutation for each bucket and the corresponding inverse permutations that can restore the original order of the data if applied to the permuted data. :param bucket_counts: The number of elements per bucket. :return: For each bucket a permutation and inverse permutation is returned. """ data_permutations = [] # type: List[mx.nd.NDArray] inverse_data_permutations = [] # type: List[mx.nd.NDArray] for num_samples in bucket_counts: if num_samples == 0: num_samples = 1 # new random order: data_permutation = np.random.permutation(num_samples) inverse_data_permutation = np.empty(num_samples, np.int32) inverse_data_permutation[data_permutation] = np.arange(num_samples) inverse_data_permutation = mx.nd.array(inverse_data_permutation) data_permutation = mx.nd.array(data_permutation) data_permutations.append(data_permutation) inverse_data_permutations.append(inverse_data_permutation) return data_permutations, inverse_data_permutations
python
{ "resource": "" }
q26278
get_batch_indices
train
def get_batch_indices(data: ParallelDataSet, bucket_batch_sizes: List[BucketBatchSize]) -> List[Tuple[int, int]]: """ Returns a list of index tuples that index into the bucket and the start index inside a bucket given the batch size for a bucket. These indices are valid for the given dataset. Put another way, this returns the starting points for all batches within the dataset, across all buckets. :param data: Data to create indices for. :param bucket_batch_sizes: Bucket batch sizes. :return: List of 2d indices. """ # create index tuples (i,j) into buckets: i := bucket index ; j := row index of bucket array idxs = [] # type: List[Tuple[int, int]] for buck_idx, buck in enumerate(data.source): bucket = bucket_batch_sizes[buck_idx].bucket batch_size = bucket_batch_sizes[buck_idx].batch_size num_samples = data.source[buck_idx].shape[0] rest = num_samples % batch_size if rest > 0: logger.info("Ignoring %d samples from bucket %s with %d samples due to incomplete batch", rest, bucket, num_samples) idxs.extend([(buck_idx, j) for j in range(0, num_samples - batch_size + 1, batch_size)]) return idxs
python
{ "resource": "" }
q26279
ParallelDataSet.save
train
def save(self, fname: str): """ Saves the dataset to a binary .npy file. """ mx.nd.save(fname, self.source + self.target + self.label)
python
{ "resource": "" }
q26280
ParallelDataSet.load
train
def load(fname: str) -> 'ParallelDataSet': """ Loads a dataset from a binary .npy file. """ data = mx.nd.load(fname) n = len(data) // 3 source = data[:n] target = data[n:2 * n] label = data[2 * n:] assert len(source) == len(target) == len(label) return ParallelDataSet(source, target, label)
python
{ "resource": "" }
q26281
ParallelDataSet.fill_up
train
def fill_up(self, bucket_batch_sizes: List[BucketBatchSize], seed: int = 42) -> 'ParallelDataSet': """ Returns a new dataset with buckets filled up. :param bucket_batch_sizes: Bucket batch sizes. :param seed: The random seed used for sampling sentences to fill up. :return: New dataset with buckets filled up to the next multiple of batch size """ source = list(self.source) target = list(self.target) label = list(self.label) rs = np.random.RandomState(seed) for bucket_idx in range(len(self)): bucket = bucket_batch_sizes[bucket_idx].bucket bucket_batch_size = bucket_batch_sizes[bucket_idx].batch_size bucket_source = self.source[bucket_idx] bucket_target = self.target[bucket_idx] bucket_label = self.label[bucket_idx] num_samples = bucket_source.shape[0] # Fill up the last batch by randomly sampling from the extant items. if num_samples % bucket_batch_size != 0: rest = bucket_batch_size - num_samples % bucket_batch_size desired_indices_np = rs.randint(num_samples, size=rest) desired_indices = mx.nd.array(desired_indices_np) if isinstance(source[bucket_idx], np.ndarray): source[bucket_idx] = np.concatenate((bucket_source, bucket_source.take(desired_indices_np)), axis=0) else: source[bucket_idx] = mx.nd.concat(bucket_source, bucket_source.take(desired_indices), dim=0) target[bucket_idx] = mx.nd.concat(bucket_target, bucket_target.take(desired_indices), dim=0) label[bucket_idx] = mx.nd.concat(bucket_label, bucket_label.take(desired_indices), dim=0) return ParallelDataSet(source, target, label)
python
{ "resource": "" }
q26282
BatchedRawParallelSampleIter.iter_next
train
def iter_next(self) -> bool: """ True if the iterator can return another batch. """ # Read batch_size lines from the source stream sources_sentences = [[] for x in self.sources_sentences] # type: List[List[str]] target_sentences = [] # type: List[str] num_read = 0 for num_read, (sources, target) in enumerate(parallel_iterate(self.sources_iters, self.target_iter, skip_blanks=False), 1): source_len = 0 if sources[0] is None else len(sources[0]) target_len = 0 if target is None else len(target) if source_len > self.max_len_source: logger.info("Trimming source sentence {} ({} -> {})".format(self.sentno + num_read, source_len, self.max_len_source)) sources = [source[0:self.max_len_source] for source in sources] if target_len > self.max_len_target: logger.info("Trimming target sentence {} ({} -> {})".format(self.sentno + num_read, target_len, self.max_len_target)) target = target[0:self.max_len_target] for i, source in enumerate(sources): sources_sentences[i].append(source) target_sentences.append(target) if num_read == self.batch_size: break self.sentno += num_read if num_read == 0: self.next_batch = None return False # The final batch may be underfilled, so mark it num_pad = self.batch_size - num_read dataset = self.data_loader.load(sources_sentences, target_sentences, [num_read]).fill_up(self.bucket_batch_sizes) data = [dataset.source[0], dataset.target[0]] label = dataset.label provide_data = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in zip(self.data_names, data)] provide_label = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in zip(self.label_names, label)] self.next_batch = mx.io.DataBatch(data, label, pad=num_pad, index=None, bucket_key=self.buckets[0], provide_data=provide_data, provide_label=provide_label) return True
python
{ "resource": "" }
q26283
BatchedRawParallelSampleIter.next
train
def next(self) -> mx.io.DataBatch: """ Returns the next batch. """ if self.iter_next(): return self.next_batch raise StopIteration
python
{ "resource": "" }
q26284
ParallelSampleIter.reset
train
def reset(self): """ Resets and reshuffles the data. """ self.curr_batch_index = 0 if self.permute: # shuffle batch start indices random.shuffle(self.batch_indices) # restore the data permutation self.data = self.data.permute(self.inverse_data_permutations) # permute the data within each batch self.data_permutations, self.inverse_data_permutations = get_permutations(self.data.get_bucket_counts()) self.data = self.data.permute(self.data_permutations)
python
{ "resource": "" }
q26285
ParallelSampleIter.save_state
train
def save_state(self, fname: str): """ Saves the current state of iterator to a file, so that iteration can be continued. Note that the data is not saved, i.e. the iterator must be initialized with the same parameters as in the first call. :param fname: File name to save the information to. """ with open(fname, "wb") as fp: pickle.dump(self.batch_indices, fp) pickle.dump(self.curr_batch_index, fp) np.save(fp, [a.asnumpy() for a in self.inverse_data_permutations]) np.save(fp, [a.asnumpy() for a in self.data_permutations])
python
{ "resource": "" }
q26286
ParallelSampleIter.load_state
train
def load_state(self, fname: str): """ Loads the state of the iterator from a file. :param fname: File name to load the information from. """ # restore order self.data = self.data.permute(self.inverse_data_permutations) with open(fname, "rb") as fp: self.batch_indices = pickle.load(fp) self.curr_batch_index = pickle.load(fp) inverse_data_permutations = np.load(fp) data_permutations = np.load(fp) # Right after loading the iterator state, next() should be called self.curr_batch_index -= 1 # load previous permutations self.inverse_data_permutations = [] self.data_permutations = [] for bucket in range(len(self.data)): inverse_permutation = mx.nd.array(inverse_data_permutations[bucket]) self.inverse_data_permutations.append(inverse_permutation) permutation = mx.nd.array(data_permutations[bucket]) self.data_permutations.append(permutation) self.data = self.data.permute(self.data_permutations)
python
{ "resource": "" }
q26287
create_checkpoint_decoder
train
def create_checkpoint_decoder(args: argparse.Namespace, exit_stack: ExitStack, train_context: List[mx.Context]) -> Optional[checkpoint_decoder.CheckpointDecoder]: """ Returns a checkpoint decoder or None. :param args: Arguments as returned by argparse. :param exit_stack: An ExitStack from contextlib. :param train_context: Context for training. :return: A CheckpointDecoder if --decode-and-evaluate != 0, else None. """ sample_size = args.decode_and_evaluate if args.optimized_metric == C.BLEU and sample_size == 0: logger.info("You chose BLEU as the optimized metric, will turn on BLEU monitoring during training. " "To control how many validation sentences are used for calculating bleu use " "the --decode-and-evaluate argument.") sample_size = -1 if sample_size == 0: return None if args.use_cpu or args.decode_and_evaluate_use_cpu: context = mx.cpu() elif args.decode_and_evaluate_device_id is not None: context = utils.determine_context(device_ids=args.decode_and_evaluate_device_id, use_cpu=False, disable_device_locking=args.disable_device_locking, lock_dir=args.lock_dir, exit_stack=exit_stack)[0] else: # default decode context is the last training device context = train_context[-1] return checkpoint_decoder.CheckpointDecoderImageModel(context=context, inputs=[args.validation_source] + args.validation_source_factors, references=args.validation_target, model=args.output, sample_size=sample_size, source_image_size=args.source_image_size, image_root=args.validation_source_root, max_output_length=args.max_output_length, use_feature_loader=args.image_preextracted_features)
python
{ "resource": "" }
q26288
get_preinit_encoders
train
def get_preinit_encoders(encoders: List[encoder.Encoder]) -> List[Tuple[str, mx.init.Initializer]]: """ Get initializers from encoders. Some encoders might be initialized from pretrained models. :param encoders: List of encoders :return: The list of initializers """ init = [] # type: List[Tuple[str, mx.init.Initializer]] for enc in encoders: if hasattr(enc, "get_initializers"): enc = cast(encoder_image.ImageLoadedCnnEncoder, enc) init.extend(enc.get_initializers()) return init
python
{ "resource": "" }
q26289
get_recurrent_encoder
train
def get_recurrent_encoder(config: RecurrentEncoderConfig, prefix: str) -> 'Encoder': """ Returns an encoder stack with a bi-directional RNN, and a variable number of uni-directional forward RNNs. :param config: Configuration for recurrent encoder. :param prefix: Prefix for variable names. :return: Encoder instance. """ # TODO give more control on encoder architecture encoder_seq = EncoderSequence([], config.dtype) if config.conv_config is not None: encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config, prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX) if config.conv_config.add_positional_encoding: # If specified, add positional encodings to segment embeddings encoder_seq.append(AddSinCosPositionalEmbeddings, num_embed=config.conv_config.num_embed, scale_up_input=False, scale_down_positions=False, prefix="%s%sadd_positional_encodings" % (prefix, C.CHAR_SEQ_ENCODER_PREFIX)) encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.TIME_MAJOR) else: encoder_seq.append(ConvertLayout, target_layout=C.TIME_MAJOR, num_hidden=0) if config.reverse_input: encoder_seq.append(ReverseSequence, infer_hidden=True) if config.rnn_config.residual: utils.check_condition(config.rnn_config.first_residual_layer >= 2, "Residual connections on the first encoder layer are not supported") # One layer bi-directional RNN: encoder_seq.append(BiDirectionalRNNEncoder, rnn_config=config.rnn_config.copy(num_layers=1), prefix=prefix + C.BIDIRECTIONALRNN_PREFIX, layout=C.TIME_MAJOR) if config.rnn_config.num_layers > 1: # Stacked uni-directional RNN: # Because we already have a one layer bi-rnn we reduce the num_layers as well as the first_residual_layer. remaining_rnn_config = config.rnn_config.copy(num_layers=config.rnn_config.num_layers - 1, first_residual_layer=config.rnn_config.first_residual_layer - 1) encoder_seq.append(RecurrentEncoder, rnn_config=remaining_rnn_config, prefix=prefix + C.STACKEDRNN_PREFIX, layout=C.TIME_MAJOR) encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.BATCH_MAJOR) return encoder_seq
python
{ "resource": "" }
q26290
get_convolutional_encoder
train
def get_convolutional_encoder(config: ConvolutionalEncoderConfig, prefix: str) -> 'Encoder': """ Creates a convolutional encoder. :param config: Configuration for convolutional encoder. :param prefix: Prefix for variable names. :return: Encoder instance. """ encoder_seq = EncoderSequence([], dtype=config.dtype) cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type, config.num_embed, max_seq_len=config.max_seq_len_source, fixed_pos_embed_scale_up_input=False, fixed_pos_embed_scale_down_positions=True, prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX) encoder_seq.append(cls, **encoder_params) encoder_seq.append(ConvolutionalEncoder, config=config) return encoder_seq
python
{ "resource": "" }
q26291
get_transformer_encoder
train
def get_transformer_encoder(config: transformer.TransformerConfig, prefix: str) -> 'Encoder': """ Returns a Transformer encoder, consisting of an embedding layer with positional encodings and a TransformerEncoder instance. :param config: Configuration for transformer encoder. :param prefix: Prefix for variable names. :return: Encoder instance. """ encoder_seq = EncoderSequence([], dtype=config.dtype) cls, encoder_params = _get_positional_embedding_params(config.positional_embedding_type, config.model_size, config.max_seq_len_source, fixed_pos_embed_scale_up_input=True, fixed_pos_embed_scale_down_positions=False, prefix=prefix + C.SOURCE_POSITIONAL_EMBEDDING_PREFIX) encoder_seq.append(cls, **encoder_params) if config.conv_config is not None: encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config, prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX) encoder_seq.append(TransformerEncoder, config=config, prefix=prefix + C.TRANSFORMER_ENCODER_PREFIX) return encoder_seq
python
{ "resource": "" }
q26292
EncoderSequence.append
train
def append(self, cls, infer_hidden: bool = False, **kwargs) -> Encoder: """ Extends sequence with new Encoder. 'dtype' gets passed into Encoder instance if not present in parameters and supported by specific Encoder type. :param cls: Encoder type. :param infer_hidden: If number of hidden should be inferred from previous encoder. :param kwargs: Named arbitrary parameters for Encoder. :return: Instance of Encoder. """ params = dict(kwargs) if infer_hidden: params['num_hidden'] = self.get_num_hidden() sig_params = inspect.signature(cls.__init__).parameters if 'dtype' in sig_params and 'dtype' not in kwargs: params['dtype'] = self.dtype encoder = cls(**params) self.encoders.append(encoder) return encoder
python
{ "resource": "" }
q26293
BiDirectionalRNNEncoder._encode
train
def _encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> mx.sym.Symbol: """ Bidirectionally encodes time-major data. """ # (seq_len, batch_size, num_embed) data_reverse = mx.sym.SequenceReverse(data=data, sequence_length=data_length, use_sequence_length=True) # (seq_length, batch, cell_num_hidden) hidden_forward, _, _ = self.forward_rnn.encode(data, data_length, seq_len) # (seq_length, batch, cell_num_hidden) hidden_reverse, _, _ = self.reverse_rnn.encode(data_reverse, data_length, seq_len) # (seq_length, batch, cell_num_hidden) hidden_reverse = mx.sym.SequenceReverse(data=hidden_reverse, sequence_length=data_length, use_sequence_length=True) # (seq_length, batch, 2 * cell_num_hidden) hidden_concat = mx.sym.concat(hidden_forward, hidden_reverse, dim=2, name="%s_rnn" % self.prefix) return hidden_concat
python
{ "resource": "" }
q26294
BiDirectionalRNNEncoder.get_rnn_cells
train
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]: """ Returns a list of RNNCells used by this encoder. """ return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells()
python
{ "resource": "" }
q26295
ConvolutionalEncoder.encode
train
def encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]: """ Encodes data with a stack of Convolution+GLU blocks given sequence lengths of individual examples and maximum sequence length. :param data: Input data. Shape: (batch_size, seq_len, input_num_hidden). :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded version of the data. """ # data: (batch_size, seq_len, num_hidden) data = mx.sym.FullyConnected(data=data, num_hidden=self.config.cnn_config.num_hidden, no_bias=True, flatten=False, weight=self.i2h_weight) # Multiple layers with residual connections: for layer in self.layers: data = data + layer(data, data_length, seq_len) return data, data_length, seq_len
python
{ "resource": "" }
q26296
_instantiate
train
def _instantiate(cls, params): """ Helper to instantiate Attention classes from parameters. Warns in log if parameter is not supported by class constructor. :param cls: Attention class. :param params: configuration parameters. :return: instance of `cls` type. """ sig_params = inspect.signature(cls.__init__).parameters valid_params = dict() for key, value in params.items(): if key in sig_params: valid_params[key] = value else: logger.debug('Type %s does not support parameter \'%s\'' % (cls.__name__, key)) return cls(**valid_params)
python
{ "resource": "" }
q26297
get_attention
train
def get_attention(config: AttentionConfig, max_seq_len: int, prefix: str = C.ATTENTION_PREFIX) -> 'Attention': """ Returns an Attention instance based on attention_type. :param config: Attention configuration. :param max_seq_len: Maximum length of source sequences. :param prefix: Name prefix. :return: Instance of Attention. """ att_cls = Attention.get_attention_cls(config.type) params = config.__dict__.copy() params.pop('_frozen') params['max_seq_len'] = max_seq_len params['prefix'] = prefix return _instantiate(att_cls, params)
python
{ "resource": "" }
q26298
get_context_and_attention_probs
train
def get_context_and_attention_probs(values: mx.sym.Symbol, length: mx.sym.Symbol, logits: mx.sym.Symbol, dtype: str) -> Tuple[mx.sym.Symbol, mx.sym.Symbol]: """ Returns context vector and attention probabilities via a weighted sum over values. :param values: Shape: (batch_size, seq_len, encoder_num_hidden). :param length: Shape: (batch_size,). :param logits: Shape: (batch_size, seq_len, 1). :param dtype: data type. :return: context: (batch_size, encoder_num_hidden), attention_probs: (batch_size, seq_len). """ # masks attention scores according to sequence length. # (batch_size, seq_len, 1) logits = mx.sym.SequenceMask(data=logits, axis=1, use_sequence_length=True, sequence_length=length, value=-C.LARGE_VALUES[dtype]) # (batch_size, seq_len, 1) probs = mx.sym.softmax(logits, axis=1, name='attention_softmax') # batch_dot: (batch, M, K) X (batch, K, N) –> (batch, M, N). # (batch_size, seq_len, num_hidden) X (batch_size, seq_len, 1) -> (batch_size, num_hidden, 1) context = mx.sym.batch_dot(lhs=values, rhs=probs, transpose_a=True) # (batch_size, encoder_num_hidden, 1)-> (batch_size, encoder_num_hidden) context = mx.sym.reshape(data=context, shape=(0, 0)) probs = mx.sym.reshape(data=probs, shape=(0, 0)) return context, probs
python
{ "resource": "" }
q26299
Attention.get_initial_state
train
def get_initial_state(self, source_length: mx.sym.Symbol, source_seq_len: int) -> AttentionState: """ Returns initial attention state. Dynamic source encoding is initialized with zeros. :param source_length: Source length. Shape: (batch_size,). :param source_seq_len: Maximum length of source sequences. """ dynamic_source = mx.sym.reshape(mx.sym.zeros_like(source_length), shape=(-1, 1, 1)) # dynamic_source: (batch_size, source_seq_len, num_hidden_dynamic_source) dynamic_source = mx.sym.broadcast_to(dynamic_source, shape=(0, source_seq_len, self.dynamic_source_num_hidden)) return AttentionState(context=None, probs=None, dynamic_source=dynamic_source)
python
{ "resource": "" }