partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
Module.save_checkpoint
Saves current progress to checkpoint. Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training. Parameters ---------- prefix : str The file prefix to checkpoint to. epoch : int The current epoch number. save_optimizer_states : bool Whether to save optimizer states to continue training.
python/mxnet/module/module.py
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False): """Saves current progress to checkpoint. Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training. Parameters ---------- prefix : str The file prefix to checkpoint to. epoch : int The current epoch number. save_optimizer_states : bool Whether to save optimizer states to continue training. """ self._symbol.save('%s-symbol.json'%prefix) param_name = '%s-%04d.params' % (prefix, epoch) self.save_params(param_name) logging.info('Saved checkpoint to \"%s\"', param_name) if save_optimizer_states: state_name = '%s-%04d.states' % (prefix, epoch) self.save_optimizer_states(state_name) logging.info('Saved optimizer state to \"%s\"', state_name)
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False): """Saves current progress to checkpoint. Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training. Parameters ---------- prefix : str The file prefix to checkpoint to. epoch : int The current epoch number. save_optimizer_states : bool Whether to save optimizer states to continue training. """ self._symbol.save('%s-symbol.json'%prefix) param_name = '%s-%04d.params' % (prefix, epoch) self.save_params(param_name) logging.info('Saved checkpoint to \"%s\"', param_name) if save_optimizer_states: state_name = '%s-%04d.states' % (prefix, epoch) self.save_optimizer_states(state_name) logging.info('Saved optimizer state to \"%s\"', state_name)
[ "Saves", "current", "progress", "to", "checkpoint", ".", "Use", "mx", ".", "callback", ".", "module_checkpoint", "as", "epoch_end_callback", "to", "save", "during", "training", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L165-L185
[ "def", "save_checkpoint", "(", "self", ",", "prefix", ",", "epoch", ",", "save_optimizer_states", "=", "False", ")", ":", "self", ".", "_symbol", ".", "save", "(", "'%s-symbol.json'", "%", "prefix", ")", "param_name", "=", "'%s-%04d.params'", "%", "(", "pref...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module._reset_bind
Internal function to reset binded state.
python/mxnet/module/module.py
def _reset_bind(self): """Internal function to reset binded state.""" self.binded = False self._exec_group = None self._data_shapes = None self._label_shapes = None
def _reset_bind(self): """Internal function to reset binded state.""" self.binded = False self._exec_group = None self._data_shapes = None self._label_shapes = None
[ "Internal", "function", "to", "reset", "binded", "state", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L187-L192
[ "def", "_reset_bind", "(", "self", ")", ":", "self", ".", "binded", "=", "False", "self", ".", "_exec_group", "=", "None", "self", ".", "_data_shapes", "=", "None", "self", ".", "_label_shapes", "=", "None" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.get_params
Gets current parameters. Returns ------- `(arg_params, aux_params)` A pair of dictionaries each mapping parameter names to NDArray values.
python/mxnet/module/module.py
def get_params(self): """Gets current parameters. Returns ------- `(arg_params, aux_params)` A pair of dictionaries each mapping parameter names to NDArray values. """ assert self.binded and self.params_initialized if self._params_dirty: self._sync_params_from_devices() return (self._arg_params, self._aux_params)
def get_params(self): """Gets current parameters. Returns ------- `(arg_params, aux_params)` A pair of dictionaries each mapping parameter names to NDArray values. """ assert self.binded and self.params_initialized if self._params_dirty: self._sync_params_from_devices() return (self._arg_params, self._aux_params)
[ "Gets", "current", "parameters", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L245-L257
[ "def", "get_params", "(", "self", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "if", "self", ".", "_params_dirty", ":", "self", ".", "_sync_params_from_devices", "(", ")", "return", "(", "self", ".", "_arg_params", ","...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.init_params
Initializes the parameters and auxiliary states. Parameters ---------- initializer : Initializer Called to initialize parameters if needed. arg_params : dict If not ``None``, should be a dictionary of existing arg_params. Initialization will be copied from that. aux_params : dict If not ``None``, should be a dictionary of existing aux_params. Initialization will be copied from that. allow_missing : bool If ``True``, params could contain missing values, and the initializer will be called to fill those missing params. force_init : bool If ``True``, will force re-initialize even if already initialized. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor.
python/mxnet/module/module.py
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=False): """Initializes the parameters and auxiliary states. Parameters ---------- initializer : Initializer Called to initialize parameters if needed. arg_params : dict If not ``None``, should be a dictionary of existing arg_params. Initialization will be copied from that. aux_params : dict If not ``None``, should be a dictionary of existing aux_params. Initialization will be copied from that. allow_missing : bool If ``True``, params could contain missing values, and the initializer will be called to fill those missing params. force_init : bool If ``True``, will force re-initialize even if already initialized. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. """ if self.params_initialized and not force_init: warnings.warn("Parameters already initialized and force_init=False. " "init_params call ignored.", stacklevel=2) return assert self.binded, 'call bind before initializing the parameters' def _impl(name, arr, cache): """Internal helper for parameter initialization""" if cache is not None: if name in cache: cache_arr = cache[name] # just in case the cached array is just the target itself if cache_arr is not arr: cache_arr.copyto(arr) else: if not allow_missing: raise RuntimeError("%s is not presented" % name) if initializer is not None: initializer(name, arr) else: initializer(name, arr) attrs = self._symbol.attr_dict() for name, arr in sorted(self._arg_params.items()): desc = InitDesc(name, attrs.get(name, None)) _impl(desc, arr, arg_params) for name, arr in sorted(self._aux_params.items()): desc = InitDesc(name, attrs.get(name, None)) _impl(desc, arr, aux_params) self.params_initialized = True self._params_dirty = False # copy the initialized parameters to devices self._exec_group.set_params(self._arg_params, self._aux_params, allow_extra=allow_extra)
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=False): """Initializes the parameters and auxiliary states. Parameters ---------- initializer : Initializer Called to initialize parameters if needed. arg_params : dict If not ``None``, should be a dictionary of existing arg_params. Initialization will be copied from that. aux_params : dict If not ``None``, should be a dictionary of existing aux_params. Initialization will be copied from that. allow_missing : bool If ``True``, params could contain missing values, and the initializer will be called to fill those missing params. force_init : bool If ``True``, will force re-initialize even if already initialized. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. """ if self.params_initialized and not force_init: warnings.warn("Parameters already initialized and force_init=False. " "init_params call ignored.", stacklevel=2) return assert self.binded, 'call bind before initializing the parameters' def _impl(name, arr, cache): """Internal helper for parameter initialization""" if cache is not None: if name in cache: cache_arr = cache[name] # just in case the cached array is just the target itself if cache_arr is not arr: cache_arr.copyto(arr) else: if not allow_missing: raise RuntimeError("%s is not presented" % name) if initializer is not None: initializer(name, arr) else: initializer(name, arr) attrs = self._symbol.attr_dict() for name, arr in sorted(self._arg_params.items()): desc = InitDesc(name, attrs.get(name, None)) _impl(desc, arr, arg_params) for name, arr in sorted(self._aux_params.items()): desc = InitDesc(name, attrs.get(name, None)) _impl(desc, arr, aux_params) self.params_initialized = True self._params_dirty = False # copy the initialized parameters to devices self._exec_group.set_params(self._arg_params, self._aux_params, allow_extra=allow_extra)
[ "Initializes", "the", "parameters", "and", "auxiliary", "states", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L259-L320
[ "def", "init_params", "(", "self", ",", "initializer", "=", "Uniform", "(", "0.01", ")", ",", "arg_params", "=", "None", ",", "aux_params", "=", "None", ",", "allow_missing", "=", "False", ",", "force_init", "=", "False", ",", "allow_extra", "=", "False", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.set_params
Assigns parameter and aux state values. Parameters ---------- arg_params : dict Dictionary of name to `NDArray`. aux_params : dict Dictionary of name to `NDArray`. allow_missing : bool If ``True``, params could contain missing values, and the initializer will be called to fill those missing params. force_init : bool If ``True``, will force re-initialize even if already initialized. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. Examples -------- >>> # An example of setting module parameters. >>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load) >>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
python/mxnet/module/module.py
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True, allow_extra=False): """Assigns parameter and aux state values. Parameters ---------- arg_params : dict Dictionary of name to `NDArray`. aux_params : dict Dictionary of name to `NDArray`. allow_missing : bool If ``True``, params could contain missing values, and the initializer will be called to fill those missing params. force_init : bool If ``True``, will force re-initialize even if already initialized. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. Examples -------- >>> # An example of setting module parameters. >>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load) >>> mod.set_params(arg_params=arg_params, aux_params=aux_params) """ if not allow_missing: self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init, allow_extra=allow_extra) return if self.params_initialized and not force_init: warnings.warn("Parameters already initialized and force_init=False. " "set_params call ignored.", stacklevel=2) return self._exec_group.set_params(arg_params, aux_params, allow_extra=allow_extra) # because we didn't update self._arg_params, they are dirty now. self._params_dirty = True self.params_initialized = True
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True, allow_extra=False): """Assigns parameter and aux state values. Parameters ---------- arg_params : dict Dictionary of name to `NDArray`. aux_params : dict Dictionary of name to `NDArray`. allow_missing : bool If ``True``, params could contain missing values, and the initializer will be called to fill those missing params. force_init : bool If ``True``, will force re-initialize even if already initialized. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. Examples -------- >>> # An example of setting module parameters. >>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load) >>> mod.set_params(arg_params=arg_params, aux_params=aux_params) """ if not allow_missing: self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init, allow_extra=allow_extra) return if self.params_initialized and not force_init: warnings.warn("Parameters already initialized and force_init=False. " "set_params call ignored.", stacklevel=2) return self._exec_group.set_params(arg_params, aux_params, allow_extra=allow_extra) # because we didn't update self._arg_params, they are dirty now. self._params_dirty = True self.params_initialized = True
[ "Assigns", "parameter", "and", "aux", "state", "values", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L322-L362
[ "def", "set_params", "(", "self", ",", "arg_params", ",", "aux_params", ",", "allow_missing", "=", "False", ",", "force_init", "=", "True", ",", "allow_extra", "=", "False", ")", ":", "if", "not", "allow_missing", ":", "self", ".", "init_params", "(", "ini...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.bind
Binds the symbols to construct executors. This is necessary before one can perform computation with the module. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``. for_training : bool Default is ``True``. Whether the executors should be bound for training. inputs_need_grad : bool Default is ``False``. Whether the gradients to the input data need to be computed. Typically this is not needed. But this might be needed when implementing composition of modules. force_rebind : bool Default is ``False``. This function does nothing if the executors are already bound. But with this ``True``, the executors will be forced to rebind. shared_module : Module Default is ``None``. This is used in bucketing. When not ``None``, the shared module essentially corresponds to a different bucket -- a module with different symbol but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
python/mxnet/module/module.py
def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): """Binds the symbols to construct executors. This is necessary before one can perform computation with the module. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``. for_training : bool Default is ``True``. Whether the executors should be bound for training. inputs_need_grad : bool Default is ``False``. Whether the gradients to the input data need to be computed. Typically this is not needed. But this might be needed when implementing composition of modules. force_rebind : bool Default is ``False``. This function does nothing if the executors are already bound. But with this ``True``, the executors will be forced to rebind. shared_module : Module Default is ``None``. This is used in bucketing. When not ``None``, the shared module essentially corresponds to a different bucket -- a module with different symbol but with the same sets of parameters (e.g. unrolled RNNs with different lengths). """ # force rebinding is typically used when one want to switch from # training to prediction phase. if force_rebind: self._reset_bind() if self.binded: self.logger.warning('Already bound, ignoring bind()') return self.for_training = for_training self.inputs_need_grad = inputs_need_grad self._grad_req = grad_req if not for_training: assert not inputs_need_grad else: pass # this is not True, as some module might not contains a loss function # that consumes the labels # assert label_shapes is not None self._data_shapes, self._label_shapes = _parse_data_desc( self.data_names, self.label_names, data_shapes, label_shapes) if shared_module is not None: assert isinstance(shared_module, Module) and \ shared_module.binded and shared_module.params_initialized shared_group = shared_module._exec_group assert len(shared_group.execs) >= len(self._context) else: shared_group = None self._exec_group = DataParallelExecutorGroup(self._symbol, self._context, self._work_load_list, self._data_shapes, self._label_shapes, self._param_names, for_training, inputs_need_grad, shared_group, logger=self.logger, fixed_param_names=self._fixed_param_names, grad_req=grad_req, group2ctxs=self._group2ctxs, state_names=self._state_names) self._total_exec_bytes = self._exec_group._total_exec_bytes if shared_module is not None: self.params_initialized = True self._arg_params = shared_module._arg_params self._aux_params = shared_module._aux_params elif self.params_initialized: # if the parameters are already initialized, we are re-binding # so automatically copy the already initialized params self._exec_group.set_params(self._arg_params, self._aux_params) else: assert self._arg_params is None and self._aux_params is None param_arrays = [ zeros(shape=x[0].shape, dtype=x[0].dtype, stype=x[0].stype) for x in self._exec_group.param_arrays ] self._arg_params = {name:arr for name, arr in zip(self._param_names, param_arrays)} aux_arrays = [ zeros(x[0].shape, dtype=x[0].dtype) for x in self._exec_group.aux_arrays ] self._aux_params = {name:arr for name, arr in zip(self._aux_names, aux_arrays)} if shared_module is not None and shared_module.optimizer_initialized: self.borrow_optimizer(shared_module) self.binded = True
def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): """Binds the symbols to construct executors. This is necessary before one can perform computation with the module. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``. for_training : bool Default is ``True``. Whether the executors should be bound for training. inputs_need_grad : bool Default is ``False``. Whether the gradients to the input data need to be computed. Typically this is not needed. But this might be needed when implementing composition of modules. force_rebind : bool Default is ``False``. This function does nothing if the executors are already bound. But with this ``True``, the executors will be forced to rebind. shared_module : Module Default is ``None``. This is used in bucketing. When not ``None``, the shared module essentially corresponds to a different bucket -- a module with different symbol but with the same sets of parameters (e.g. unrolled RNNs with different lengths). """ # force rebinding is typically used when one want to switch from # training to prediction phase. if force_rebind: self._reset_bind() if self.binded: self.logger.warning('Already bound, ignoring bind()') return self.for_training = for_training self.inputs_need_grad = inputs_need_grad self._grad_req = grad_req if not for_training: assert not inputs_need_grad else: pass # this is not True, as some module might not contains a loss function # that consumes the labels # assert label_shapes is not None self._data_shapes, self._label_shapes = _parse_data_desc( self.data_names, self.label_names, data_shapes, label_shapes) if shared_module is not None: assert isinstance(shared_module, Module) and \ shared_module.binded and shared_module.params_initialized shared_group = shared_module._exec_group assert len(shared_group.execs) >= len(self._context) else: shared_group = None self._exec_group = DataParallelExecutorGroup(self._symbol, self._context, self._work_load_list, self._data_shapes, self._label_shapes, self._param_names, for_training, inputs_need_grad, shared_group, logger=self.logger, fixed_param_names=self._fixed_param_names, grad_req=grad_req, group2ctxs=self._group2ctxs, state_names=self._state_names) self._total_exec_bytes = self._exec_group._total_exec_bytes if shared_module is not None: self.params_initialized = True self._arg_params = shared_module._arg_params self._aux_params = shared_module._aux_params elif self.params_initialized: # if the parameters are already initialized, we are re-binding # so automatically copy the already initialized params self._exec_group.set_params(self._arg_params, self._aux_params) else: assert self._arg_params is None and self._aux_params is None param_arrays = [ zeros(shape=x[0].shape, dtype=x[0].dtype, stype=x[0].stype) for x in self._exec_group.param_arrays ] self._arg_params = {name:arr for name, arr in zip(self._param_names, param_arrays)} aux_arrays = [ zeros(x[0].shape, dtype=x[0].dtype) for x in self._exec_group.aux_arrays ] self._aux_params = {name:arr for name, arr in zip(self._aux_names, aux_arrays)} if shared_module is not None and shared_module.optimizer_initialized: self.borrow_optimizer(shared_module) self.binded = True
[ "Binds", "the", "symbols", "to", "construct", "executors", ".", "This", "is", "necessary", "before", "one", "can", "perform", "computation", "with", "the", "module", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L364-L456
[ "def", "bind", "(", "self", ",", "data_shapes", ",", "label_shapes", "=", "None", ",", "for_training", "=", "True", ",", "inputs_need_grad", "=", "False", ",", "force_rebind", "=", "False", ",", "shared_module", "=", "None", ",", "grad_req", "=", "'write'", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.reshape
Reshapes the module for new input shapes. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``.
python/mxnet/module/module.py
def reshape(self, data_shapes, label_shapes=None): """Reshapes the module for new input shapes. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``. """ assert self.binded self._data_shapes, self._label_shapes = _parse_data_desc( self.data_names, self.label_names, data_shapes, label_shapes) self._exec_group.reshape(self._data_shapes, self._label_shapes)
def reshape(self, data_shapes, label_shapes=None): """Reshapes the module for new input shapes. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``. """ assert self.binded self._data_shapes, self._label_shapes = _parse_data_desc( self.data_names, self.label_names, data_shapes, label_shapes) self._exec_group.reshape(self._data_shapes, self._label_shapes)
[ "Reshapes", "the", "module", "for", "new", "input", "shapes", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L458-L472
[ "def", "reshape", "(", "self", ",", "data_shapes", ",", "label_shapes", "=", "None", ")", ":", "assert", "self", ".", "binded", "self", ".", "_data_shapes", ",", "self", ".", "_label_shapes", "=", "_parse_data_desc", "(", "self", ".", "data_names", ",", "s...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.init_optimizer
Installs and initializes optimizers. Parameters ---------- kvstore : str or KVStore Default `'local'`. optimizer : str or Optimizer Default `'sgd'` optimizer_params : dict Default `(('learning_rate', 0.01),)`. The default value is not a dictionary, just to avoid pylint warning of dangerous default values. force_init : bool Default ``False``, indicating whether we should force re-initializing the optimizer in the case an optimizer is already installed.
python/mxnet/module/module.py
def init_optimizer(self, kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), force_init=False): """Installs and initializes optimizers. Parameters ---------- kvstore : str or KVStore Default `'local'`. optimizer : str or Optimizer Default `'sgd'` optimizer_params : dict Default `(('learning_rate', 0.01),)`. The default value is not a dictionary, just to avoid pylint warning of dangerous default values. force_init : bool Default ``False``, indicating whether we should force re-initializing the optimizer in the case an optimizer is already installed. """ assert self.binded and self.params_initialized if self.optimizer_initialized and not force_init: self.logger.warning('optimizer already initialized, ignoring...') return if self._params_dirty: self._sync_params_from_devices() (kvstore, update_on_kvstore) = \ _create_kvstore(kvstore, len(self._context), self._arg_params) batch_size = self._exec_group.batch_size if kvstore and 'dist' in kvstore.type and '_sync' in kvstore.type: batch_size *= kvstore.num_workers rescale_grad = 1.0/batch_size idx2name = {} if update_on_kvstore: idx2name.update(enumerate(self._exec_group.param_names)) else: for k in range(len(self._context)): idx2name.update({i*len(self._context)+k: n for i, n in enumerate(self._exec_group.param_names)}) if isinstance(optimizer, str): optimizer_params = dict(optimizer_params) if 'rescale_grad' not in optimizer_params: optimizer_params['rescale_grad'] = rescale_grad optimizer = opt.create(optimizer, sym=self.symbol, param_idx2name=idx2name, **optimizer_params) else: assert isinstance(optimizer, opt.Optimizer) if optimizer.rescale_grad != rescale_grad: #pylint: disable=no-member warnings.warn( "Optimizer created manually outside Module but rescale_grad " + "is not normalized to 1.0/batch_size/num_workers (%s vs. %s). "%( optimizer.rescale_grad, rescale_grad) + "Is this intended?", stacklevel=2) if not optimizer.idx2name: optimizer.idx2name = idx2name.copy() self._optimizer = optimizer self._kvstore = kvstore self._update_on_kvstore = update_on_kvstore self._updater = None if kvstore: if self._compression_params: kvstore.set_gradient_compression(self._compression_params) if update_on_kvstore: kvstore.set_optimizer(self._optimizer) # copy initialized local parameters to kvstore _initialize_kvstore(kvstore=kvstore, param_arrays=self._exec_group.param_arrays, arg_params=self._arg_params, param_names=self._param_names, update_on_kvstore=update_on_kvstore) if not update_on_kvstore: self._updater = opt.get_updater(optimizer) self.optimizer_initialized = True if self._preload_opt_states is not None: self.load_optimizer_states(self._preload_opt_states) self._preload_opt_states = None
def init_optimizer(self, kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), force_init=False): """Installs and initializes optimizers. Parameters ---------- kvstore : str or KVStore Default `'local'`. optimizer : str or Optimizer Default `'sgd'` optimizer_params : dict Default `(('learning_rate', 0.01),)`. The default value is not a dictionary, just to avoid pylint warning of dangerous default values. force_init : bool Default ``False``, indicating whether we should force re-initializing the optimizer in the case an optimizer is already installed. """ assert self.binded and self.params_initialized if self.optimizer_initialized and not force_init: self.logger.warning('optimizer already initialized, ignoring...') return if self._params_dirty: self._sync_params_from_devices() (kvstore, update_on_kvstore) = \ _create_kvstore(kvstore, len(self._context), self._arg_params) batch_size = self._exec_group.batch_size if kvstore and 'dist' in kvstore.type and '_sync' in kvstore.type: batch_size *= kvstore.num_workers rescale_grad = 1.0/batch_size idx2name = {} if update_on_kvstore: idx2name.update(enumerate(self._exec_group.param_names)) else: for k in range(len(self._context)): idx2name.update({i*len(self._context)+k: n for i, n in enumerate(self._exec_group.param_names)}) if isinstance(optimizer, str): optimizer_params = dict(optimizer_params) if 'rescale_grad' not in optimizer_params: optimizer_params['rescale_grad'] = rescale_grad optimizer = opt.create(optimizer, sym=self.symbol, param_idx2name=idx2name, **optimizer_params) else: assert isinstance(optimizer, opt.Optimizer) if optimizer.rescale_grad != rescale_grad: #pylint: disable=no-member warnings.warn( "Optimizer created manually outside Module but rescale_grad " + "is not normalized to 1.0/batch_size/num_workers (%s vs. %s). "%( optimizer.rescale_grad, rescale_grad) + "Is this intended?", stacklevel=2) if not optimizer.idx2name: optimizer.idx2name = idx2name.copy() self._optimizer = optimizer self._kvstore = kvstore self._update_on_kvstore = update_on_kvstore self._updater = None if kvstore: if self._compression_params: kvstore.set_gradient_compression(self._compression_params) if update_on_kvstore: kvstore.set_optimizer(self._optimizer) # copy initialized local parameters to kvstore _initialize_kvstore(kvstore=kvstore, param_arrays=self._exec_group.param_arrays, arg_params=self._arg_params, param_names=self._param_names, update_on_kvstore=update_on_kvstore) if not update_on_kvstore: self._updater = opt.get_updater(optimizer) self.optimizer_initialized = True if self._preload_opt_states is not None: self.load_optimizer_states(self._preload_opt_states) self._preload_opt_states = None
[ "Installs", "and", "initializes", "optimizers", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L474-L558
[ "def", "init_optimizer", "(", "self", ",", "kvstore", "=", "'local'", ",", "optimizer", "=", "'sgd'", ",", "optimizer_params", "=", "(", "(", "'learning_rate'", ",", "0.01", ")", ",", ")", ",", "force_init", "=", "False", ")", ":", "assert", "self", ".",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.borrow_optimizer
Borrows optimizer from a shared module. Used in bucketing, where exactly the same optimizer (esp. kvstore) is used. Parameters ---------- shared_module : Module
python/mxnet/module/module.py
def borrow_optimizer(self, shared_module): """Borrows optimizer from a shared module. Used in bucketing, where exactly the same optimizer (esp. kvstore) is used. Parameters ---------- shared_module : Module """ assert shared_module.optimizer_initialized self._optimizer = shared_module._optimizer self._kvstore = shared_module._kvstore self._update_on_kvstore = shared_module._update_on_kvstore self._updater = shared_module._updater self.optimizer_initialized = True
def borrow_optimizer(self, shared_module): """Borrows optimizer from a shared module. Used in bucketing, where exactly the same optimizer (esp. kvstore) is used. Parameters ---------- shared_module : Module """ assert shared_module.optimizer_initialized self._optimizer = shared_module._optimizer self._kvstore = shared_module._kvstore self._update_on_kvstore = shared_module._update_on_kvstore self._updater = shared_module._updater self.optimizer_initialized = True
[ "Borrows", "optimizer", "from", "a", "shared", "module", ".", "Used", "in", "bucketing", "where", "exactly", "the", "same", "optimizer", "(", "esp", ".", "kvstore", ")", "is", "used", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L560-L573
[ "def", "borrow_optimizer", "(", "self", ",", "shared_module", ")", ":", "assert", "shared_module", ".", "optimizer_initialized", "self", ".", "_optimizer", "=", "shared_module", ".", "_optimizer", "self", ".", "_kvstore", "=", "shared_module", ".", "_kvstore", "se...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.forward
Forward computation. It supports data batches with different shapes, such as different batch sizes or different image sizes. If reshaping of data batch relates to modification of symbol or module, such as changing image layout ordering or switching from training to predicting, module rebinding is required. See Also ---------- :meth:`BaseModule.forward`. Parameters ---------- data_batch : DataBatch Could be anything with similar API implemented. is_train : bool Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``.
python/mxnet/module/module.py
def forward(self, data_batch, is_train=None): """Forward computation. It supports data batches with different shapes, such as different batch sizes or different image sizes. If reshaping of data batch relates to modification of symbol or module, such as changing image layout ordering or switching from training to predicting, module rebinding is required. See Also ---------- :meth:`BaseModule.forward`. Parameters ---------- data_batch : DataBatch Could be anything with similar API implemented. is_train : bool Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``. """ assert self.binded and self.params_initialized curr_data_shapes = tuple(i.shape for i in self._data_shapes) if isinstance(data_batch, list): assert data_batch is not None, "Encountered empty data batch" new_data_shapes = [] for i in range(len(data_batch[0].data)): shape = data_batch[0].data[i].shape for db in data_batch: assert shape == db.data[i].shape, \ "All data batches in a list need to have the same shape" new_batch_size = len(data_batch) * shape[0] new_data_shapes.append((new_batch_size,) + shape[1:]) new_data_shapes = tuple(new_data_shapes) else: new_data_shapes = tuple(i.shape for i in data_batch.data) if curr_data_shapes != new_data_shapes: if hasattr(data_batch, "provide_data") and data_batch.provide_data: new_dshape = data_batch.provide_data else: new_dshape = [DataDesc(i.name, shape, i.dtype, i.layout) \ for i, shape in zip(self._data_shapes, new_data_shapes)] if hasattr(data_batch, "provide_label") and data_batch.provide_label: new_lshape = data_batch.provide_label elif hasattr(data_batch, "label") and data_batch.label: new_lshape = [DataDesc(i.name, j.shape, i.dtype, i.layout) \ for i, j in zip(self._label_shapes, data_batch.label)] else: new_lshape = None self.reshape(new_dshape, new_lshape) self._exec_group.forward(data_batch, is_train)
def forward(self, data_batch, is_train=None): """Forward computation. It supports data batches with different shapes, such as different batch sizes or different image sizes. If reshaping of data batch relates to modification of symbol or module, such as changing image layout ordering or switching from training to predicting, module rebinding is required. See Also ---------- :meth:`BaseModule.forward`. Parameters ---------- data_batch : DataBatch Could be anything with similar API implemented. is_train : bool Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``. """ assert self.binded and self.params_initialized curr_data_shapes = tuple(i.shape for i in self._data_shapes) if isinstance(data_batch, list): assert data_batch is not None, "Encountered empty data batch" new_data_shapes = [] for i in range(len(data_batch[0].data)): shape = data_batch[0].data[i].shape for db in data_batch: assert shape == db.data[i].shape, \ "All data batches in a list need to have the same shape" new_batch_size = len(data_batch) * shape[0] new_data_shapes.append((new_batch_size,) + shape[1:]) new_data_shapes = tuple(new_data_shapes) else: new_data_shapes = tuple(i.shape for i in data_batch.data) if curr_data_shapes != new_data_shapes: if hasattr(data_batch, "provide_data") and data_batch.provide_data: new_dshape = data_batch.provide_data else: new_dshape = [DataDesc(i.name, shape, i.dtype, i.layout) \ for i, shape in zip(self._data_shapes, new_data_shapes)] if hasattr(data_batch, "provide_label") and data_batch.provide_label: new_lshape = data_batch.provide_label elif hasattr(data_batch, "label") and data_batch.label: new_lshape = [DataDesc(i.name, j.shape, i.dtype, i.layout) \ for i, j in zip(self._label_shapes, data_batch.label)] else: new_lshape = None self.reshape(new_dshape, new_lshape) self._exec_group.forward(data_batch, is_train)
[ "Forward", "computation", ".", "It", "supports", "data", "batches", "with", "different", "shapes", "such", "as", "different", "batch", "sizes", "or", "different", "image", "sizes", ".", "If", "reshaping", "of", "data", "batch", "relates", "to", "modification", ...
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L575-L627
[ "def", "forward", "(", "self", ",", "data_batch", ",", "is_train", "=", "None", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "curr_data_shapes", "=", "tuple", "(", "i", ".", "shape", "for", "i", "in", "self", ".", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.backward
Backward computation. See Also ---------- :meth:`BaseModule.backward`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function.
python/mxnet/module/module.py
def backward(self, out_grads=None): """Backward computation. See Also ---------- :meth:`BaseModule.backward`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.binded and self.params_initialized self._exec_group.backward(out_grads=out_grads)
def backward(self, out_grads=None): """Backward computation. See Also ---------- :meth:`BaseModule.backward`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. """ assert self.binded and self.params_initialized self._exec_group.backward(out_grads=out_grads)
[ "Backward", "computation", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L629-L644
[ "def", "backward", "(", "self", ",", "out_grads", "=", "None", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "self", ".", "_exec_group", ".", "backward", "(", "out_grads", "=", "out_grads", ")" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.update
Updates parameters according to the installed optimizer and the gradients computed in the previous forward-backward batch. When KVStore is used to update parameters for multi-device or multi-machine training, a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters, this function does update the copy of parameters in KVStore, but doesn't broadcast the updated parameters to all devices / machines. Please call `prepare` to broadcast `row_sparse` parameters with the next batch of data. See Also ---------- :meth:`BaseModule.update`.
python/mxnet/module/module.py
def update(self): """Updates parameters according to the installed optimizer and the gradients computed in the previous forward-backward batch. When KVStore is used to update parameters for multi-device or multi-machine training, a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters, this function does update the copy of parameters in KVStore, but doesn't broadcast the updated parameters to all devices / machines. Please call `prepare` to broadcast `row_sparse` parameters with the next batch of data. See Also ---------- :meth:`BaseModule.update`. """ assert self.binded and self.params_initialized and self.optimizer_initialized self._params_dirty = True if self._update_on_kvstore: _update_params_on_kvstore(self._exec_group.param_arrays, self._exec_group.grad_arrays, self._kvstore, self._exec_group.param_names) else: _update_params(self._exec_group.param_arrays, self._exec_group.grad_arrays, updater=self._updater, num_device=len(self._context), kvstore=self._kvstore, param_names=self._exec_group.param_names)
def update(self): """Updates parameters according to the installed optimizer and the gradients computed in the previous forward-backward batch. When KVStore is used to update parameters for multi-device or multi-machine training, a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters, this function does update the copy of parameters in KVStore, but doesn't broadcast the updated parameters to all devices / machines. Please call `prepare` to broadcast `row_sparse` parameters with the next batch of data. See Also ---------- :meth:`BaseModule.update`. """ assert self.binded and self.params_initialized and self.optimizer_initialized self._params_dirty = True if self._update_on_kvstore: _update_params_on_kvstore(self._exec_group.param_arrays, self._exec_group.grad_arrays, self._kvstore, self._exec_group.param_names) else: _update_params(self._exec_group.param_arrays, self._exec_group.grad_arrays, updater=self._updater, num_device=len(self._context), kvstore=self._kvstore, param_names=self._exec_group.param_names)
[ "Updates", "parameters", "according", "to", "the", "installed", "optimizer", "and", "the", "gradients", "computed", "in", "the", "previous", "forward", "-", "backward", "batch", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L646-L673
[ "def", "update", "(", "self", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "and", "self", ".", "optimizer_initialized", "self", ".", "_params_dirty", "=", "True", "if", "self", ".", "_update_on_kvstore", ":", "_update_pa...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.get_outputs
Gets outputs of the previous forward computation. If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray` might live on different devices. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray Output.
python/mxnet/module/module.py
def get_outputs(self, merge_multi_context=True): """Gets outputs of the previous forward computation. If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray` might live on different devices. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray Output. """ assert self.binded and self.params_initialized return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
def get_outputs(self, merge_multi_context=True): """Gets outputs of the previous forward computation. If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray` might live on different devices. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray Output. """ assert self.binded and self.params_initialized return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
[ "Gets", "outputs", "of", "the", "previous", "forward", "computation", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L675-L697
[ "def", "get_outputs", "(", "self", ",", "merge_multi_context", "=", "True", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "return", "self", ".", "_exec_group", ".", "get_outputs", "(", "merge_multi_context", "=", "merge_mul...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.get_input_grads
Gets the gradients with respect to the inputs of the module. If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray Input gradients
python/mxnet/module/module.py
def get_input_grads(self, merge_multi_context=True): """Gets the gradients with respect to the inputs of the module. If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray Input gradients """ assert self.binded and self.params_initialized and self.inputs_need_grad return self._exec_group.get_input_grads(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True): """Gets the gradients with respect to the inputs of the module. If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray Input gradients """ assert self.binded and self.params_initialized and self.inputs_need_grad return self._exec_group.get_input_grads(merge_multi_context=merge_multi_context)
[ "Gets", "the", "gradients", "with", "respect", "to", "the", "inputs", "of", "the", "module", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L699-L720
[ "def", "get_input_grads", "(", "self", ",", "merge_multi_context", "=", "True", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "and", "self", ".", "inputs_need_grad", "return", "self", ".", "_exec_group", ".", "get_input_gra...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.get_states
Gets states from all devices. If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the states will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray States
python/mxnet/module/module.py
def get_states(self, merge_multi_context=True): """Gets states from all devices. If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the states will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray States """ assert self.binded and self.params_initialized return self._exec_group.get_states(merge_multi_context=merge_multi_context)
def get_states(self, merge_multi_context=True): """Gets states from all devices. If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the states will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray States """ assert self.binded and self.params_initialized return self._exec_group.get_states(merge_multi_context=merge_multi_context)
[ "Gets", "states", "from", "all", "devices", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L722-L743
[ "def", "get_states", "(", "self", ",", "merge_multi_context", "=", "True", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "return", "self", ".", "_exec_group", ".", "get_states", "(", "merge_multi_context", "=", "merge_multi...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.update_metric
Evaluates and accumulates evaluation metric on outputs of the last forward computation. See Also ---------- :meth:`BaseModule.update_metric`. Parameters ---------- eval_metric : EvalMetric Evaluation metric to use. labels : list of NDArray if `pre_sliced` parameter is set to `False`, list of lists of NDArray otherwise. Typically `data_batch.label`. pre_sliced: bool Whether the labels are already sliced per device (default: False).
python/mxnet/module/module.py
def update_metric(self, eval_metric, labels, pre_sliced=False): """Evaluates and accumulates evaluation metric on outputs of the last forward computation. See Also ---------- :meth:`BaseModule.update_metric`. Parameters ---------- eval_metric : EvalMetric Evaluation metric to use. labels : list of NDArray if `pre_sliced` parameter is set to `False`, list of lists of NDArray otherwise. Typically `data_batch.label`. pre_sliced: bool Whether the labels are already sliced per device (default: False). """ self._exec_group.update_metric(eval_metric, labels, pre_sliced)
def update_metric(self, eval_metric, labels, pre_sliced=False): """Evaluates and accumulates evaluation metric on outputs of the last forward computation. See Also ---------- :meth:`BaseModule.update_metric`. Parameters ---------- eval_metric : EvalMetric Evaluation metric to use. labels : list of NDArray if `pre_sliced` parameter is set to `False`, list of lists of NDArray otherwise. Typically `data_batch.label`. pre_sliced: bool Whether the labels are already sliced per device (default: False). """ self._exec_group.update_metric(eval_metric, labels, pre_sliced)
[ "Evaluates", "and", "accumulates", "evaluation", "metric", "on", "outputs", "of", "the", "last", "forward", "computation", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L759-L775
[ "def", "update_metric", "(", "self", ",", "eval_metric", ",", "labels", ",", "pre_sliced", "=", "False", ")", ":", "self", ".", "_exec_group", ".", "update_metric", "(", "eval_metric", ",", "labels", ",", "pre_sliced", ")" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module._sync_params_from_devices
Synchronizes parameters from devices to CPU. This function should be called after calling `update` that updates the parameters on the devices, before one can read the latest parameters from ``self._arg_params`` and ``self._aux_params``. For row_sparse parameters on devices, ther are pulled from KVStore with all row ids.
python/mxnet/module/module.py
def _sync_params_from_devices(self): """Synchronizes parameters from devices to CPU. This function should be called after calling `update` that updates the parameters on the devices, before one can read the latest parameters from ``self._arg_params`` and ``self._aux_params``. For row_sparse parameters on devices, ther are pulled from KVStore with all row ids. """ self._exec_group.get_params(self._arg_params, self._aux_params) if self._kvstore and self._update_on_kvstore: for param_name, param_val in sorted(self._arg_params.items()): if param_val.stype == 'row_sparse': row_ids = nd.arange(0, param_val.shape[0], dtype='int64') self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_ids) self._params_dirty = False
def _sync_params_from_devices(self): """Synchronizes parameters from devices to CPU. This function should be called after calling `update` that updates the parameters on the devices, before one can read the latest parameters from ``self._arg_params`` and ``self._aux_params``. For row_sparse parameters on devices, ther are pulled from KVStore with all row ids. """ self._exec_group.get_params(self._arg_params, self._aux_params) if self._kvstore and self._update_on_kvstore: for param_name, param_val in sorted(self._arg_params.items()): if param_val.stype == 'row_sparse': row_ids = nd.arange(0, param_val.shape[0], dtype='int64') self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_ids) self._params_dirty = False
[ "Synchronizes", "parameters", "from", "devices", "to", "CPU", ".", "This", "function", "should", "be", "called", "after", "calling", "update", "that", "updates", "the", "parameters", "on", "the", "devices", "before", "one", "can", "read", "the", "latest", "par...
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L777-L791
[ "def", "_sync_params_from_devices", "(", "self", ")", ":", "self", ".", "_exec_group", ".", "get_params", "(", "self", ".", "_arg_params", ",", "self", ".", "_aux_params", ")", "if", "self", ".", "_kvstore", "and", "self", ".", "_update_on_kvstore", ":", "fo...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.save_optimizer_states
Saves optimizer (updater) state to a file. Parameters ---------- fname : str Path to output states file.
python/mxnet/module/module.py
def save_optimizer_states(self, fname): """Saves optimizer (updater) state to a file. Parameters ---------- fname : str Path to output states file. """ assert self.optimizer_initialized if self._update_on_kvstore: self._kvstore.save_optimizer_states(fname) else: with open(fname, 'wb') as fout: fout.write(self._updater.get_states())
def save_optimizer_states(self, fname): """Saves optimizer (updater) state to a file. Parameters ---------- fname : str Path to output states file. """ assert self.optimizer_initialized if self._update_on_kvstore: self._kvstore.save_optimizer_states(fname) else: with open(fname, 'wb') as fout: fout.write(self._updater.get_states())
[ "Saves", "optimizer", "(", "updater", ")", "state", "to", "a", "file", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L793-L807
[ "def", "save_optimizer_states", "(", "self", ",", "fname", ")", ":", "assert", "self", ".", "optimizer_initialized", "if", "self", ".", "_update_on_kvstore", ":", "self", ".", "_kvstore", ".", "save_optimizer_states", "(", "fname", ")", "else", ":", "with", "o...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.load_optimizer_states
Loads optimizer (updater) state from a file. Parameters ---------- fname : str Path to input states file.
python/mxnet/module/module.py
def load_optimizer_states(self, fname): """Loads optimizer (updater) state from a file. Parameters ---------- fname : str Path to input states file. """ assert self.optimizer_initialized if self._update_on_kvstore: self._kvstore.load_optimizer_states(fname) else: self._updater.set_states(open(fname, 'rb').read())
def load_optimizer_states(self, fname): """Loads optimizer (updater) state from a file. Parameters ---------- fname : str Path to input states file. """ assert self.optimizer_initialized if self._update_on_kvstore: self._kvstore.load_optimizer_states(fname) else: self._updater.set_states(open(fname, 'rb').read())
[ "Loads", "optimizer", "(", "updater", ")", "state", "from", "a", "file", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L809-L822
[ "def", "load_optimizer_states", "(", "self", ",", "fname", ")", ":", "assert", "self", ".", "optimizer_initialized", "if", "self", ".", "_update_on_kvstore", ":", "self", ".", "_kvstore", ".", "load_optimizer_states", "(", "fname", ")", "else", ":", "self", "....
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Module.prepare
Prepares the module for processing a data batch. Usually involves switching bucket and reshaping. For modules that contain `row_sparse` parameters in KVStore, it prepares the `row_sparse` parameters based on the sparse_row_id_fn. When KVStore is used to update parameters for multi-device or multi-machine training, a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters, the `update()` updates the copy of parameters in KVStore, but doesn't broadcast the updated parameters to all devices / machines. The `prepare` function is used to broadcast `row_sparse` parameters with the next batch of data. Parameters ---------- data_batch : DataBatch The current batch of data for forward computation. sparse_row_id_fn : A callback function The function takes `data_batch` as an input and returns a dict of str -> NDArray. The resulting dict is used for pulling row_sparse parameters from the kvstore, where the str key is the name of the param, and the value is the row id of the param to pull.
python/mxnet/module/module.py
def prepare(self, data_batch, sparse_row_id_fn=None): '''Prepares the module for processing a data batch. Usually involves switching bucket and reshaping. For modules that contain `row_sparse` parameters in KVStore, it prepares the `row_sparse` parameters based on the sparse_row_id_fn. When KVStore is used to update parameters for multi-device or multi-machine training, a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters, the `update()` updates the copy of parameters in KVStore, but doesn't broadcast the updated parameters to all devices / machines. The `prepare` function is used to broadcast `row_sparse` parameters with the next batch of data. Parameters ---------- data_batch : DataBatch The current batch of data for forward computation. sparse_row_id_fn : A callback function The function takes `data_batch` as an input and returns a dict of str -> NDArray. The resulting dict is used for pulling row_sparse parameters from the kvstore, where the str key is the name of the param, and the value is the row id of the param to pull. ''' assert self.binded if sparse_row_id_fn is not None: if not self._kvstore or not self._update_on_kvstore: warnings.warn(UserWarning("Parameters are not updated in the KVStore. " "No need to call sparse_row_id_fn.")) else: row_ids = sparse_row_id_fn(data_batch) assert(isinstance(row_ids, dict)), "Expected dict output from sparse_row_id_fn" for param_name, row_id in row_ids.items(): param_idx = self._exec_group.param_names.index(param_name) param_val = self._exec_group.param_arrays[param_idx] assert(isinstance(param_val, (tuple, list))) if param_val[0].stype != 'row_sparse': warnings.warn(UserWarning("%s.stype is not 'row_sparse'. No need to " "perform row_sparse_pull." % param_name)) else: self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_id, priority=-param_idx)
def prepare(self, data_batch, sparse_row_id_fn=None): '''Prepares the module for processing a data batch. Usually involves switching bucket and reshaping. For modules that contain `row_sparse` parameters in KVStore, it prepares the `row_sparse` parameters based on the sparse_row_id_fn. When KVStore is used to update parameters for multi-device or multi-machine training, a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters, the `update()` updates the copy of parameters in KVStore, but doesn't broadcast the updated parameters to all devices / machines. The `prepare` function is used to broadcast `row_sparse` parameters with the next batch of data. Parameters ---------- data_batch : DataBatch The current batch of data for forward computation. sparse_row_id_fn : A callback function The function takes `data_batch` as an input and returns a dict of str -> NDArray. The resulting dict is used for pulling row_sparse parameters from the kvstore, where the str key is the name of the param, and the value is the row id of the param to pull. ''' assert self.binded if sparse_row_id_fn is not None: if not self._kvstore or not self._update_on_kvstore: warnings.warn(UserWarning("Parameters are not updated in the KVStore. " "No need to call sparse_row_id_fn.")) else: row_ids = sparse_row_id_fn(data_batch) assert(isinstance(row_ids, dict)), "Expected dict output from sparse_row_id_fn" for param_name, row_id in row_ids.items(): param_idx = self._exec_group.param_names.index(param_name) param_val = self._exec_group.param_arrays[param_idx] assert(isinstance(param_val, (tuple, list))) if param_val[0].stype != 'row_sparse': warnings.warn(UserWarning("%s.stype is not 'row_sparse'. No need to " "perform row_sparse_pull." % param_name)) else: self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_id, priority=-param_idx)
[ "Prepares", "the", "module", "for", "processing", "a", "data", "batch", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/module.py#L829-L870
[ "def", "prepare", "(", "self", ",", "data_batch", ",", "sparse_row_id_fn", "=", "None", ")", ":", "assert", "self", ".", "binded", "if", "sparse_row_id_fn", "is", "not", "None", ":", "if", "not", "self", ".", "_kvstore", "or", "not", "self", ".", "_updat...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
_random_helper
Helper function for random generators.
python/mxnet/ndarray/random.py
def _random_helper(random, sampler, params, shape, dtype, ctx, out, kwargs): """Helper function for random generators.""" if isinstance(params[0], NDArray): for i in params[1:]: assert isinstance(i, NDArray), \ "Distribution parameters must all have the same type, but got " \ "both %s and %s."%(type(params[0]), type(i)) return sampler(*params, shape=shape, dtype=dtype, out=out, **kwargs) elif isinstance(params[0], numeric_types): if ctx is None: ctx = current_context() if shape is _Null and out is None: shape = 1 for i in params[1:]: assert isinstance(i, numeric_types), \ "Distribution parameters must all have the same type, but got " \ "both %s and %s."%(type(params[0]), type(i)) return random(*params, shape=shape, dtype=dtype, ctx=ctx, out=out, **kwargs) raise ValueError("Distribution parameters must be either NDArray or numbers, " "but got %s."%type(params[0]))
def _random_helper(random, sampler, params, shape, dtype, ctx, out, kwargs): """Helper function for random generators.""" if isinstance(params[0], NDArray): for i in params[1:]: assert isinstance(i, NDArray), \ "Distribution parameters must all have the same type, but got " \ "both %s and %s."%(type(params[0]), type(i)) return sampler(*params, shape=shape, dtype=dtype, out=out, **kwargs) elif isinstance(params[0], numeric_types): if ctx is None: ctx = current_context() if shape is _Null and out is None: shape = 1 for i in params[1:]: assert isinstance(i, numeric_types), \ "Distribution parameters must all have the same type, but got " \ "both %s and %s."%(type(params[0]), type(i)) return random(*params, shape=shape, dtype=dtype, ctx=ctx, out=out, **kwargs) raise ValueError("Distribution parameters must be either NDArray or numbers, " "but got %s."%type(params[0]))
[ "Helper", "function", "for", "random", "generators", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L31-L51
[ "def", "_random_helper", "(", "random", ",", "sampler", ",", "params", ",", "shape", ",", "dtype", ",", "ctx", ",", "out", ",", "kwargs", ")", ":", "if", "isinstance", "(", "params", "[", "0", "]", ",", "NDArray", ")", ":", "for", "i", "in", "param...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
uniform
Draw random samples from a uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : float or NDArray, optional Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0. high : float or NDArray, optional Upper boundary of the output interval. All values generated will be less than high. The default value is 1.0. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `low.context` when `low` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then the return NDArray will have shape `(x, y, m, n)`, where `m*n` uniformly distributed samples are drawn for each `[low, high)` pair. Examples -------- >>> mx.nd.random.uniform(0, 1) [ 0.54881352] <NDArray 1 @cpu(0) >>> mx.nd.random.uniform(0, 1, ctx=mx.gpu(0)) [ 0.92514056] <NDArray 1 @gpu(0)> >>> mx.nd.random.uniform(-1, 1, shape=(2,)) [ 0.71589124 0.08976638] <NDArray 2 @cpu(0)> >>> low = mx.nd.array([1,2,3]) >>> high = mx.nd.array([2,3,4]) >>> mx.nd.random.uniform(low, high, shape=2) [[ 1.78653979 1.93707538] [ 2.01311183 2.37081361] [ 3.30491424 3.69977832]] <NDArray 3x2 @cpu(0)>
python/mxnet/ndarray/random.py
def uniform(low=0, high=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : float or NDArray, optional Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0. high : float or NDArray, optional Upper boundary of the output interval. All values generated will be less than high. The default value is 1.0. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `low.context` when `low` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then the return NDArray will have shape `(x, y, m, n)`, where `m*n` uniformly distributed samples are drawn for each `[low, high)` pair. Examples -------- >>> mx.nd.random.uniform(0, 1) [ 0.54881352] <NDArray 1 @cpu(0) >>> mx.nd.random.uniform(0, 1, ctx=mx.gpu(0)) [ 0.92514056] <NDArray 1 @gpu(0)> >>> mx.nd.random.uniform(-1, 1, shape=(2,)) [ 0.71589124 0.08976638] <NDArray 2 @cpu(0)> >>> low = mx.nd.array([1,2,3]) >>> high = mx.nd.array([2,3,4]) >>> mx.nd.random.uniform(low, high, shape=2) [[ 1.78653979 1.93707538] [ 2.01311183 2.37081361] [ 3.30491424 3.69977832]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_uniform, _internal._sample_uniform, [low, high], shape, dtype, ctx, out, kwargs)
def uniform(low=0, high=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : float or NDArray, optional Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0. high : float or NDArray, optional Upper boundary of the output interval. All values generated will be less than high. The default value is 1.0. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `low.context` when `low` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then the return NDArray will have shape `(x, y, m, n)`, where `m*n` uniformly distributed samples are drawn for each `[low, high)` pair. Examples -------- >>> mx.nd.random.uniform(0, 1) [ 0.54881352] <NDArray 1 @cpu(0) >>> mx.nd.random.uniform(0, 1, ctx=mx.gpu(0)) [ 0.92514056] <NDArray 1 @gpu(0)> >>> mx.nd.random.uniform(-1, 1, shape=(2,)) [ 0.71589124 0.08976638] <NDArray 2 @cpu(0)> >>> low = mx.nd.array([1,2,3]) >>> high = mx.nd.array([2,3,4]) >>> mx.nd.random.uniform(low, high, shape=2) [[ 1.78653979 1.93707538] [ 2.01311183 2.37081361] [ 3.30491424 3.69977832]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_uniform, _internal._sample_uniform, [low, high], shape, dtype, ctx, out, kwargs)
[ "Draw", "random", "samples", "from", "a", "uniform", "distribution", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L54-L110
[ "def", "uniform", "(", "low", "=", "0", ",", "high", "=", "1", ",", "shape", "=", "_Null", ",", "dtype", "=", "_Null", ",", "ctx", "=", "None", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_random_helper", "(", "_interna...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
normal
Draw random samples from a normal (Gaussian) distribution. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Parameters ---------- loc : float or NDArray, optional Mean (centre) of the distribution. scale : float or NDArray, optional Standard deviation (spread or width) of the distribution. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `loc.context` when `loc` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. Examples -------- >>> mx.nd.random.normal(0, 1) [ 2.21220636] <NDArray 1 @cpu(0)> >>> mx.nd.random.normal(0, 1, ctx=mx.gpu(0)) [ 0.29253659] <NDArray 1 @gpu(0)> >>> mx.nd.random.normal(-1, 1, shape=(2,)) [-0.2259962 -0.51619542] <NDArray 2 @cpu(0)> >>> loc = mx.nd.array([1,2,3]) >>> scale = mx.nd.array([2,3,4]) >>> mx.nd.random.normal(loc, scale, shape=2) [[ 0.55912292 3.19566321] [ 1.91728961 2.47706747] [ 2.79666662 5.44254589]] <NDArray 3x2 @cpu(0)>
python/mxnet/ndarray/random.py
def normal(loc=0, scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a normal (Gaussian) distribution. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Parameters ---------- loc : float or NDArray, optional Mean (centre) of the distribution. scale : float or NDArray, optional Standard deviation (spread or width) of the distribution. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `loc.context` when `loc` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. Examples -------- >>> mx.nd.random.normal(0, 1) [ 2.21220636] <NDArray 1 @cpu(0)> >>> mx.nd.random.normal(0, 1, ctx=mx.gpu(0)) [ 0.29253659] <NDArray 1 @gpu(0)> >>> mx.nd.random.normal(-1, 1, shape=(2,)) [-0.2259962 -0.51619542] <NDArray 2 @cpu(0)> >>> loc = mx.nd.array([1,2,3]) >>> scale = mx.nd.array([2,3,4]) >>> mx.nd.random.normal(loc, scale, shape=2) [[ 0.55912292 3.19566321] [ 1.91728961 2.47706747] [ 2.79666662 5.44254589]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_normal, _internal._sample_normal, [loc, scale], shape, dtype, ctx, out, kwargs)
def normal(loc=0, scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a normal (Gaussian) distribution. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Parameters ---------- loc : float or NDArray, optional Mean (centre) of the distribution. scale : float or NDArray, optional Standard deviation (spread or width) of the distribution. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `loc.context` when `loc` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. Examples -------- >>> mx.nd.random.normal(0, 1) [ 2.21220636] <NDArray 1 @cpu(0)> >>> mx.nd.random.normal(0, 1, ctx=mx.gpu(0)) [ 0.29253659] <NDArray 1 @gpu(0)> >>> mx.nd.random.normal(-1, 1, shape=(2,)) [-0.2259962 -0.51619542] <NDArray 2 @cpu(0)> >>> loc = mx.nd.array([1,2,3]) >>> scale = mx.nd.array([2,3,4]) >>> mx.nd.random.normal(loc, scale, shape=2) [[ 0.55912292 3.19566321] [ 1.91728961 2.47706747] [ 2.79666662 5.44254589]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_normal, _internal._sample_normal, [loc, scale], shape, dtype, ctx, out, kwargs)
[ "Draw", "random", "samples", "from", "a", "normal", "(", "Gaussian", ")", "distribution", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L113-L167
[ "def", "normal", "(", "loc", "=", "0", ",", "scale", "=", "1", ",", "shape", "=", "_Null", ",", "dtype", "=", "_Null", ",", "ctx", "=", "None", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_random_helper", "(", "_interna...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
randn
Draw random samples from a normal (Gaussian) distribution. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Parameters ---------- loc : float or NDArray Mean (centre) of the distribution. scale : float or NDArray Standard deviation (spread or width) of the distribution. shape : int or tuple of ints The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. dtype : {'float16', 'float32', 'float64'} Data type of output samples. Default is 'float32' ctx : Context Device context of output. Default is current context. Overridden by `loc.context` when `loc` is an NDArray. out : NDArray Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. Examples -------- >>> mx.nd.random.randn() 2.21220636 <NDArray 1 @cpu(0)> >>> mx.nd.random.randn(2, 2) [[-1.856082 -1.9768796 ] [-0.20801921 0.2444218 ]] <NDArray 2x2 @cpu(0)> >>> mx.nd.random.randn(2, 3, loc=5, scale=1) [[4.19962 4.8311777 5.936328 ] [5.357444 5.7793283 3.9896927]] <NDArray 2x3 @cpu(0)>
python/mxnet/ndarray/random.py
def randn(*shape, **kwargs): """Draw random samples from a normal (Gaussian) distribution. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Parameters ---------- loc : float or NDArray Mean (centre) of the distribution. scale : float or NDArray Standard deviation (spread or width) of the distribution. shape : int or tuple of ints The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. dtype : {'float16', 'float32', 'float64'} Data type of output samples. Default is 'float32' ctx : Context Device context of output. Default is current context. Overridden by `loc.context` when `loc` is an NDArray. out : NDArray Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. Examples -------- >>> mx.nd.random.randn() 2.21220636 <NDArray 1 @cpu(0)> >>> mx.nd.random.randn(2, 2) [[-1.856082 -1.9768796 ] [-0.20801921 0.2444218 ]] <NDArray 2x2 @cpu(0)> >>> mx.nd.random.randn(2, 3, loc=5, scale=1) [[4.19962 4.8311777 5.936328 ] [5.357444 5.7793283 3.9896927]] <NDArray 2x3 @cpu(0)> """ loc = kwargs.pop('loc', 0) scale = kwargs.pop('scale', 1) dtype = kwargs.pop('dtype', _Null) ctx = kwargs.pop('ctx', None) out = kwargs.pop('out', None) assert isinstance(loc, (int, float)) assert isinstance(scale, (int, float)) return _random_helper(_internal._random_normal, _internal._sample_normal, [loc, scale], shape, dtype, ctx, out, kwargs)
def randn(*shape, **kwargs): """Draw random samples from a normal (Gaussian) distribution. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Parameters ---------- loc : float or NDArray Mean (centre) of the distribution. scale : float or NDArray Standard deviation (spread or width) of the distribution. shape : int or tuple of ints The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. dtype : {'float16', 'float32', 'float64'} Data type of output samples. Default is 'float32' ctx : Context Device context of output. Default is current context. Overridden by `loc.context` when `loc` is an NDArray. out : NDArray Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair. Examples -------- >>> mx.nd.random.randn() 2.21220636 <NDArray 1 @cpu(0)> >>> mx.nd.random.randn(2, 2) [[-1.856082 -1.9768796 ] [-0.20801921 0.2444218 ]] <NDArray 2x2 @cpu(0)> >>> mx.nd.random.randn(2, 3, loc=5, scale=1) [[4.19962 4.8311777 5.936328 ] [5.357444 5.7793283 3.9896927]] <NDArray 2x3 @cpu(0)> """ loc = kwargs.pop('loc', 0) scale = kwargs.pop('scale', 1) dtype = kwargs.pop('dtype', _Null) ctx = kwargs.pop('ctx', None) out = kwargs.pop('out', None) assert isinstance(loc, (int, float)) assert isinstance(scale, (int, float)) return _random_helper(_internal._random_normal, _internal._sample_normal, [loc, scale], shape, dtype, ctx, out, kwargs)
[ "Draw", "random", "samples", "from", "a", "normal", "(", "Gaussian", ")", "distribution", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L170-L226
[ "def", "randn", "(", "*", "shape", ",", "*", "*", "kwargs", ")", ":", "loc", "=", "kwargs", ".", "pop", "(", "'loc'", ",", "0", ")", "scale", "=", "kwargs", ".", "pop", "(", "'scale'", ",", "1", ")", "dtype", "=", "kwargs", ".", "pop", "(", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
exponential
r"""Draw samples from an exponential distribution. Its probability density function is .. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}), for x > 0 and 0 elsewhere. \beta is the scale parameter, which is the inverse of the rate parameter \lambda = 1/\beta. Parameters ---------- scale : float or NDArray, optional The scale parameter, \beta = 1/\lambda. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is a scalar, output shape will be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `scale.context` when `scale` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `scale` is a scalar, output shape will be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then `output` will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in scale. Examples -------- >>> mx.nd.random.exponential(1) [ 0.79587454] <NDArray 1 @cpu(0)> >>> mx.nd.random.exponential(1, shape=(2,)) [ 0.89856035 1.25593066] <NDArray 2 @cpu(0)> >>> scale = mx.nd.array([1,2,3]) >>> mx.nd.random.exponential(scale, shape=2) [[ 0.41063145 0.42140478] [ 2.59407091 10.12439728] [ 2.42544937 1.14260709]] <NDArray 3x2 @cpu(0)>
python/mxnet/ndarray/random.py
def exponential(scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): r"""Draw samples from an exponential distribution. Its probability density function is .. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}), for x > 0 and 0 elsewhere. \beta is the scale parameter, which is the inverse of the rate parameter \lambda = 1/\beta. Parameters ---------- scale : float or NDArray, optional The scale parameter, \beta = 1/\lambda. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is a scalar, output shape will be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `scale.context` when `scale` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `scale` is a scalar, output shape will be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then `output` will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in scale. Examples -------- >>> mx.nd.random.exponential(1) [ 0.79587454] <NDArray 1 @cpu(0)> >>> mx.nd.random.exponential(1, shape=(2,)) [ 0.89856035 1.25593066] <NDArray 2 @cpu(0)> >>> scale = mx.nd.array([1,2,3]) >>> mx.nd.random.exponential(scale, shape=2) [[ 0.41063145 0.42140478] [ 2.59407091 10.12439728] [ 2.42544937 1.14260709]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_exponential, _internal._sample_exponential, [1.0/scale], shape, dtype, ctx, out, kwargs)
def exponential(scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): r"""Draw samples from an exponential distribution. Its probability density function is .. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}), for x > 0 and 0 elsewhere. \beta is the scale parameter, which is the inverse of the rate parameter \lambda = 1/\beta. Parameters ---------- scale : float or NDArray, optional The scale parameter, \beta = 1/\lambda. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is a scalar, output shape will be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `scale.context` when `scale` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `scale` is a scalar, output shape will be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then `output` will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in scale. Examples -------- >>> mx.nd.random.exponential(1) [ 0.79587454] <NDArray 1 @cpu(0)> >>> mx.nd.random.exponential(1, shape=(2,)) [ 0.89856035 1.25593066] <NDArray 2 @cpu(0)> >>> scale = mx.nd.array([1,2,3]) >>> mx.nd.random.exponential(scale, shape=2) [[ 0.41063145 0.42140478] [ 2.59407091 10.12439728] [ 2.42544937 1.14260709]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_exponential, _internal._sample_exponential, [1.0/scale], shape, dtype, ctx, out, kwargs)
[ "r", "Draw", "samples", "from", "an", "exponential", "distribution", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L279-L329
[ "def", "exponential", "(", "scale", "=", "1", ",", "shape", "=", "_Null", ",", "dtype", "=", "_Null", ",", "ctx", "=", "None", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_random_helper", "(", "_internal", ".", "_random_exp...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
gamma
Draw random samples from a gamma distribution. Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). Parameters ---------- alpha : float or NDArray, optional The shape of the gamma distribution. Should be greater than zero. beta : float or NDArray, optional The scale of the gamma distribution. Should be greater than zero. Default is equal to 1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `alpha.context` when `alpha` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. Examples -------- >>> mx.nd.random.gamma(1, 1) [ 1.93308783] <NDArray 1 @cpu(0)> >>> mx.nd.random.gamma(1, 1, shape=(2,)) [ 0.48216391 2.09890771] <NDArray 2 @cpu(0)> >>> alpha = mx.nd.array([1,2,3]) >>> beta = mx.nd.array([2,3,4]) >>> mx.nd.random.gamma(alpha, beta, shape=2) [[ 3.24343276 0.94137681] [ 3.52734375 0.45568955] [ 14.26264095 14.0170126 ]] <NDArray 3x2 @cpu(0)>
python/mxnet/ndarray/random.py
def gamma(alpha=1, beta=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a gamma distribution. Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). Parameters ---------- alpha : float or NDArray, optional The shape of the gamma distribution. Should be greater than zero. beta : float or NDArray, optional The scale of the gamma distribution. Should be greater than zero. Default is equal to 1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `alpha.context` when `alpha` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. Examples -------- >>> mx.nd.random.gamma(1, 1) [ 1.93308783] <NDArray 1 @cpu(0)> >>> mx.nd.random.gamma(1, 1, shape=(2,)) [ 0.48216391 2.09890771] <NDArray 2 @cpu(0)> >>> alpha = mx.nd.array([1,2,3]) >>> beta = mx.nd.array([2,3,4]) >>> mx.nd.random.gamma(alpha, beta, shape=2) [[ 3.24343276 0.94137681] [ 3.52734375 0.45568955] [ 14.26264095 14.0170126 ]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_gamma, _internal._sample_gamma, [alpha, beta], shape, dtype, ctx, out, kwargs)
def gamma(alpha=1, beta=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a gamma distribution. Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). Parameters ---------- alpha : float or NDArray, optional The shape of the gamma distribution. Should be greater than zero. beta : float or NDArray, optional The scale of the gamma distribution. Should be greater than zero. Default is equal to 1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `alpha.context` when `alpha` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. Examples -------- >>> mx.nd.random.gamma(1, 1) [ 1.93308783] <NDArray 1 @cpu(0)> >>> mx.nd.random.gamma(1, 1, shape=(2,)) [ 0.48216391 2.09890771] <NDArray 2 @cpu(0)> >>> alpha = mx.nd.array([1,2,3]) >>> beta = mx.nd.array([2,3,4]) >>> mx.nd.random.gamma(alpha, beta, shape=2) [[ 3.24343276 0.94137681] [ 3.52734375 0.45568955] [ 14.26264095 14.0170126 ]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_gamma, _internal._sample_gamma, [alpha, beta], shape, dtype, ctx, out, kwargs)
[ "Draw", "random", "samples", "from", "a", "gamma", "distribution", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L332-L383
[ "def", "gamma", "(", "alpha", "=", "1", ",", "beta", "=", "1", ",", "shape", "=", "_Null", ",", "dtype", "=", "_Null", ",", "ctx", "=", "None", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_random_helper", "(", "_interna...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
negative_binomial
Draw random samples from a negative binomial distribution. Samples are distributed according to a negative binomial distribution parametrized by *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). Samples will always be returned as a floating point data type. Parameters ---------- k : float or NDArray, optional Limit of unsuccessful experiments, > 0. p : float or NDArray, optional Failure probability in each experiment, >= 0 and <=1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `k.context` when `k` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. Examples -------- >>> mx.nd.random.negative_binomial(10, 0.5) [ 4.] <NDArray 1 @cpu(0)> >>> mx.nd.random.negative_binomial(10, 0.5, shape=(2,)) [ 3. 4.] <NDArray 2 @cpu(0)> >>> k = mx.nd.array([1,2,3]) >>> p = mx.nd.array([0.2,0.4,0.6]) >>> mx.nd.random.negative_binomial(k, p, shape=2) [[ 3. 2.] [ 4. 4.] [ 0. 5.]] <NDArray 3x2 @cpu(0)>
python/mxnet/ndarray/random.py
def negative_binomial(k=1, p=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a negative binomial distribution. Samples are distributed according to a negative binomial distribution parametrized by *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). Samples will always be returned as a floating point data type. Parameters ---------- k : float or NDArray, optional Limit of unsuccessful experiments, > 0. p : float or NDArray, optional Failure probability in each experiment, >= 0 and <=1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `k.context` when `k` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. Examples -------- >>> mx.nd.random.negative_binomial(10, 0.5) [ 4.] <NDArray 1 @cpu(0)> >>> mx.nd.random.negative_binomial(10, 0.5, shape=(2,)) [ 3. 4.] <NDArray 2 @cpu(0)> >>> k = mx.nd.array([1,2,3]) >>> p = mx.nd.array([0.2,0.4,0.6]) >>> mx.nd.random.negative_binomial(k, p, shape=2) [[ 3. 2.] [ 4. 4.] [ 0. 5.]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_negative_binomial, _internal._sample_negative_binomial, [k, p], shape, dtype, ctx, out, kwargs)
def negative_binomial(k=1, p=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a negative binomial distribution. Samples are distributed according to a negative binomial distribution parametrized by *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). Samples will always be returned as a floating point data type. Parameters ---------- k : float or NDArray, optional Limit of unsuccessful experiments, > 0. p : float or NDArray, optional Failure probability in each experiment, >= 0 and <=1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `k.context` when `k` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `k` and `p` are scalars, output shape will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair. Examples -------- >>> mx.nd.random.negative_binomial(10, 0.5) [ 4.] <NDArray 1 @cpu(0)> >>> mx.nd.random.negative_binomial(10, 0.5, shape=(2,)) [ 3. 4.] <NDArray 2 @cpu(0)> >>> k = mx.nd.array([1,2,3]) >>> p = mx.nd.array([0.2,0.4,0.6]) >>> mx.nd.random.negative_binomial(k, p, shape=2) [[ 3. 2.] [ 4. 4.] [ 0. 5.]] <NDArray 3x2 @cpu(0)> """ return _random_helper(_internal._random_negative_binomial, _internal._sample_negative_binomial, [k, p], shape, dtype, ctx, out, kwargs)
[ "Draw", "random", "samples", "from", "a", "negative", "binomial", "distribution", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L386-L439
[ "def", "negative_binomial", "(", "k", "=", "1", ",", "p", "=", "1", ",", "shape", "=", "_Null", ",", "dtype", "=", "_Null", ",", "ctx", "=", "None", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_random_helper", "(", "_in...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
multinomial
Concurrent sampling from multiple multinomial distributions. .. note:: The input distribution must be normalized, i.e. `data` must sum to 1 along its last dimension. Parameters ---------- data : NDArray An *n* dimensional array whose last dimension has length `k`, where `k` is the number of possible outcomes of each multinomial distribution. For example, data with shape `(m, n, k)` specifies `m*n` multinomial distributions each with `k` possible outcomes. shape : int or tuple of ints, optional The number of samples to draw from each distribution. If shape is empty one sample will be drawn from each distribution. get_prob : bool, optional If true, a second array containing log likelihood of the drawn samples will also be returned. This is usually used for reinforcement learning, where you can provide reward as head gradient w.r.t. this array to estimate gradient. out : NDArray, optional Store output to an existing NDArray. dtype : str or numpy.dtype, optional Data type of the sample output array. The default is int32. Note that the data type of the log likelihood array is the same with that of `data`. Returns ------- List, or NDArray For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input `shape` with shape `(s1, s2, ..., sx)`, returns an NDArray with shape `(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the returned NDArray consist of 0-indexed values sampled from each respective multinomial distribution provided in the `k` dimension of `data`. For the case `n`=1, and `x`=1 (one shape dimension), returned NDArray has shape `(s1,)`. If `get_prob` is set to True, this function returns a list of format: `[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` is an NDArray of the same shape as the sampled outputs. Examples -------- >>> probs = mx.nd.array([0, 0.1, 0.2, 0.3, 0.4]) >>> mx.nd.random.multinomial(probs) [3] <NDArray 1 @cpu(0)> >>> probs = mx.nd.array([[0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0]]) >>> mx.nd.random.multinomial(probs) [3 1] <NDArray 2 @cpu(0)> >>> mx.nd.random.multinomial(probs, shape=2) [[4 4] [1 2]] <NDArray 2x2 @cpu(0)> >>> mx.nd.random.multinomial(probs, get_prob=True) [3 2] <NDArray 2 @cpu(0)> [-1.20397282 -1.60943794] <NDArray 2 @cpu(0)>
python/mxnet/ndarray/random.py
def multinomial(data, shape=_Null, get_prob=False, out=None, dtype='int32', **kwargs): """Concurrent sampling from multiple multinomial distributions. .. note:: The input distribution must be normalized, i.e. `data` must sum to 1 along its last dimension. Parameters ---------- data : NDArray An *n* dimensional array whose last dimension has length `k`, where `k` is the number of possible outcomes of each multinomial distribution. For example, data with shape `(m, n, k)` specifies `m*n` multinomial distributions each with `k` possible outcomes. shape : int or tuple of ints, optional The number of samples to draw from each distribution. If shape is empty one sample will be drawn from each distribution. get_prob : bool, optional If true, a second array containing log likelihood of the drawn samples will also be returned. This is usually used for reinforcement learning, where you can provide reward as head gradient w.r.t. this array to estimate gradient. out : NDArray, optional Store output to an existing NDArray. dtype : str or numpy.dtype, optional Data type of the sample output array. The default is int32. Note that the data type of the log likelihood array is the same with that of `data`. Returns ------- List, or NDArray For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input `shape` with shape `(s1, s2, ..., sx)`, returns an NDArray with shape `(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the returned NDArray consist of 0-indexed values sampled from each respective multinomial distribution provided in the `k` dimension of `data`. For the case `n`=1, and `x`=1 (one shape dimension), returned NDArray has shape `(s1,)`. If `get_prob` is set to True, this function returns a list of format: `[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` is an NDArray of the same shape as the sampled outputs. Examples -------- >>> probs = mx.nd.array([0, 0.1, 0.2, 0.3, 0.4]) >>> mx.nd.random.multinomial(probs) [3] <NDArray 1 @cpu(0)> >>> probs = mx.nd.array([[0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0]]) >>> mx.nd.random.multinomial(probs) [3 1] <NDArray 2 @cpu(0)> >>> mx.nd.random.multinomial(probs, shape=2) [[4 4] [1 2]] <NDArray 2x2 @cpu(0)> >>> mx.nd.random.multinomial(probs, get_prob=True) [3 2] <NDArray 2 @cpu(0)> [-1.20397282 -1.60943794] <NDArray 2 @cpu(0)> """ return _internal._sample_multinomial(data, shape, get_prob, out=out, dtype=dtype, **kwargs)
def multinomial(data, shape=_Null, get_prob=False, out=None, dtype='int32', **kwargs): """Concurrent sampling from multiple multinomial distributions. .. note:: The input distribution must be normalized, i.e. `data` must sum to 1 along its last dimension. Parameters ---------- data : NDArray An *n* dimensional array whose last dimension has length `k`, where `k` is the number of possible outcomes of each multinomial distribution. For example, data with shape `(m, n, k)` specifies `m*n` multinomial distributions each with `k` possible outcomes. shape : int or tuple of ints, optional The number of samples to draw from each distribution. If shape is empty one sample will be drawn from each distribution. get_prob : bool, optional If true, a second array containing log likelihood of the drawn samples will also be returned. This is usually used for reinforcement learning, where you can provide reward as head gradient w.r.t. this array to estimate gradient. out : NDArray, optional Store output to an existing NDArray. dtype : str or numpy.dtype, optional Data type of the sample output array. The default is int32. Note that the data type of the log likelihood array is the same with that of `data`. Returns ------- List, or NDArray For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input `shape` with shape `(s1, s2, ..., sx)`, returns an NDArray with shape `(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the returned NDArray consist of 0-indexed values sampled from each respective multinomial distribution provided in the `k` dimension of `data`. For the case `n`=1, and `x`=1 (one shape dimension), returned NDArray has shape `(s1,)`. If `get_prob` is set to True, this function returns a list of format: `[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` is an NDArray of the same shape as the sampled outputs. Examples -------- >>> probs = mx.nd.array([0, 0.1, 0.2, 0.3, 0.4]) >>> mx.nd.random.multinomial(probs) [3] <NDArray 1 @cpu(0)> >>> probs = mx.nd.array([[0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0]]) >>> mx.nd.random.multinomial(probs) [3 1] <NDArray 2 @cpu(0)> >>> mx.nd.random.multinomial(probs, shape=2) [[4 4] [1 2]] <NDArray 2x2 @cpu(0)> >>> mx.nd.random.multinomial(probs, get_prob=True) [3 2] <NDArray 2 @cpu(0)> [-1.20397282 -1.60943794] <NDArray 2 @cpu(0)> """ return _internal._sample_multinomial(data, shape, get_prob, out=out, dtype=dtype, **kwargs)
[ "Concurrent", "sampling", "from", "multiple", "multinomial", "distributions", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L500-L562
[ "def", "multinomial", "(", "data", ",", "shape", "=", "_Null", ",", "get_prob", "=", "False", ",", "out", "=", "None", ",", "dtype", "=", "'int32'", ",", "*", "*", "kwargs", ")", ":", "return", "_internal", ".", "_sample_multinomial", "(", "data", ",",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
randint
Draw random samples from a discrete uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : int, required Lower boundary of the output interval. All values generated will be greater than or equal to low. high : int, required Upper boundary of the output interval. All values generated will be less than high. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. dtype : {'int32', 'int64'}, optional Data type of output samples. Default is 'int32' ctx : Context, optional Device context of output. Default is current context. Overridden by `low.context` when `low` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)`, the returned NDArray will shape will be `(m, n)`. Contents of the returned NDArray will be samples from the interval `[low, high)`. Examples -------- >>> mx.nd.random.randint(5, 100) [ 90] <NDArray 1 @cpu(0) >>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0)) [ -8] <NDArray 1 @gpu(0)> >>> mx.nd.random.randint(-10, 10, shape=(2,)) [ -5 4] <NDArray 2 @cpu(0)>
python/mxnet/ndarray/random.py
def randint(low, high, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a discrete uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : int, required Lower boundary of the output interval. All values generated will be greater than or equal to low. high : int, required Upper boundary of the output interval. All values generated will be less than high. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. dtype : {'int32', 'int64'}, optional Data type of output samples. Default is 'int32' ctx : Context, optional Device context of output. Default is current context. Overridden by `low.context` when `low` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)`, the returned NDArray will shape will be `(m, n)`. Contents of the returned NDArray will be samples from the interval `[low, high)`. Examples -------- >>> mx.nd.random.randint(5, 100) [ 90] <NDArray 1 @cpu(0) >>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0)) [ -8] <NDArray 1 @gpu(0)> >>> mx.nd.random.randint(-10, 10, shape=(2,)) [ -5 4] <NDArray 2 @cpu(0)> """ return _random_helper(_internal._random_randint, None, [low, high], shape, dtype, ctx, out, kwargs)
def randint(low, high, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): """Draw random samples from a discrete uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : int, required Lower boundary of the output interval. All values generated will be greater than or equal to low. high : int, required Upper boundary of the output interval. All values generated will be less than high. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`. dtype : {'int32', 'int64'}, optional Data type of output samples. Default is 'int32' ctx : Context, optional Device context of output. Default is current context. Overridden by `low.context` when `low` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)`, the returned NDArray will shape will be `(m, n)`. Contents of the returned NDArray will be samples from the interval `[low, high)`. Examples -------- >>> mx.nd.random.randint(5, 100) [ 90] <NDArray 1 @cpu(0) >>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0)) [ -8] <NDArray 1 @gpu(0)> >>> mx.nd.random.randint(-10, 10, shape=(2,)) [ -5 4] <NDArray 2 @cpu(0)> """ return _random_helper(_internal._random_randint, None, [low, high], shape, dtype, ctx, out, kwargs)
[ "Draw", "random", "samples", "from", "a", "discrete", "uniform", "distribution", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/random.py#L604-L649
[ "def", "randint", "(", "low", ",", "high", ",", "shape", "=", "_Null", ",", "dtype", "=", "_Null", ",", "ctx", "=", "None", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_random_helper", "(", "_internal", ".", "_random_randin...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
preprocess_uci_adult
Some tricks of feature engineering are adapted from tensorflow's wide and deep tutorial.
example/sparse/wide_deep/data.py
def preprocess_uci_adult(data_name): """Some tricks of feature engineering are adapted from tensorflow's wide and deep tutorial. """ csv_columns = [ "age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket" ] vocabulary_dict = { "gender": [ "Female", "Male" ], "education": [ "Bachelors", "HS-grad", "11th", "Masters", "9th", "Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th", "Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th", "Preschool", "12th" ], "marital_status": [ "Married-civ-spouse", "Divorced", "Married-spouse-absent", "Never-married", "Separated", "Married-AF-spouse", "Widowed" ], "relationship": [ "Husband", "Not-in-family", "Wife", "Own-child", "Unmarried", "Other-relative" ], "workclass": [ "Self-emp-not-inc", "Private", "State-gov", "Federal-gov", "Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked" ] } # wide columns crossed_columns = [ ["education", "occupation"], ["native_country", "occupation"], ["age_buckets", "education", "occupation"], ] age_boundaries = [18, 25, 30, 35, 40, 45, 50, 55, 60, 65] # deep columns indicator_columns = ['workclass', 'education', 'gender', 'relationship'] embedding_columns = ['native_country', 'occupation'] continuous_columns = ['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week'] # income_bracket column is the label labels = ["<", ">"] hash_bucket_size = 1000 csr_ncols = len(crossed_columns) * hash_bucket_size dns_ncols = len(continuous_columns) + len(embedding_columns) for col in indicator_columns: dns_ncols += len(vocabulary_dict[col]) label_list = [] csr_list = [] dns_list = [] with open(data_name) as f: for row in DictReader(f, fieldnames=csv_columns): label_list.append(labels.index(row['income_bracket'].strip()[0])) for i, cols in enumerate(crossed_columns): if cols[0] == "age_buckets": age_bucket = np.digitize(float(row["age"]), age_boundaries) s = '_'.join([row[col].strip() for col in cols[1:]]) s += '_' + str(age_bucket) csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0)) else: s = '_'.join([row[col].strip() for col in cols]) csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0)) dns_row = [0] * dns_ncols dns_dim = 0 for col in embedding_columns: dns_row[dns_dim] = hash(row[col].strip()) % hash_bucket_size dns_dim += 1 for col in indicator_columns: dns_row[dns_dim + vocabulary_dict[col].index(row[col].strip())] = 1.0 dns_dim += len(vocabulary_dict[col]) for col in continuous_columns: dns_row[dns_dim] = float(row[col].strip()) dns_dim += 1 dns_list.append(dns_row) data_list = [item[1] for item in csr_list] indices_list = [item[0] for item in csr_list] indptr_list = range(0, len(indices_list) + 1, len(crossed_columns)) # convert to ndarrays csr = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=(len(label_list), hash_bucket_size * len(crossed_columns))) dns = np.array(dns_list) label = np.array(label_list) return csr, dns, label
def preprocess_uci_adult(data_name): """Some tricks of feature engineering are adapted from tensorflow's wide and deep tutorial. """ csv_columns = [ "age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket" ] vocabulary_dict = { "gender": [ "Female", "Male" ], "education": [ "Bachelors", "HS-grad", "11th", "Masters", "9th", "Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th", "Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th", "Preschool", "12th" ], "marital_status": [ "Married-civ-spouse", "Divorced", "Married-spouse-absent", "Never-married", "Separated", "Married-AF-spouse", "Widowed" ], "relationship": [ "Husband", "Not-in-family", "Wife", "Own-child", "Unmarried", "Other-relative" ], "workclass": [ "Self-emp-not-inc", "Private", "State-gov", "Federal-gov", "Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked" ] } # wide columns crossed_columns = [ ["education", "occupation"], ["native_country", "occupation"], ["age_buckets", "education", "occupation"], ] age_boundaries = [18, 25, 30, 35, 40, 45, 50, 55, 60, 65] # deep columns indicator_columns = ['workclass', 'education', 'gender', 'relationship'] embedding_columns = ['native_country', 'occupation'] continuous_columns = ['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week'] # income_bracket column is the label labels = ["<", ">"] hash_bucket_size = 1000 csr_ncols = len(crossed_columns) * hash_bucket_size dns_ncols = len(continuous_columns) + len(embedding_columns) for col in indicator_columns: dns_ncols += len(vocabulary_dict[col]) label_list = [] csr_list = [] dns_list = [] with open(data_name) as f: for row in DictReader(f, fieldnames=csv_columns): label_list.append(labels.index(row['income_bracket'].strip()[0])) for i, cols in enumerate(crossed_columns): if cols[0] == "age_buckets": age_bucket = np.digitize(float(row["age"]), age_boundaries) s = '_'.join([row[col].strip() for col in cols[1:]]) s += '_' + str(age_bucket) csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0)) else: s = '_'.join([row[col].strip() for col in cols]) csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0)) dns_row = [0] * dns_ncols dns_dim = 0 for col in embedding_columns: dns_row[dns_dim] = hash(row[col].strip()) % hash_bucket_size dns_dim += 1 for col in indicator_columns: dns_row[dns_dim + vocabulary_dict[col].index(row[col].strip())] = 1.0 dns_dim += len(vocabulary_dict[col]) for col in continuous_columns: dns_row[dns_dim] = float(row[col].strip()) dns_dim += 1 dns_list.append(dns_row) data_list = [item[1] for item in csr_list] indices_list = [item[0] for item in csr_list] indptr_list = range(0, len(indices_list) + 1, len(crossed_columns)) # convert to ndarrays csr = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list), shape=(len(label_list), hash_bucket_size * len(crossed_columns))) dns = np.array(dns_list) label = np.array(label_list) return csr, dns, label
[ "Some", "tricks", "of", "feature", "engineering", "are", "adapted", "from", "tensorflow", "s", "wide", "and", "deep", "tutorial", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/sparse/wide_deep/data.py#L40-L139
[ "def", "preprocess_uci_adult", "(", "data_name", ")", ":", "csv_columns", "=", "[", "\"age\"", ",", "\"workclass\"", ",", "\"fnlwgt\"", ",", "\"education\"", ",", "\"education_num\"", ",", "\"marital_status\"", ",", "\"occupation\"", ",", "\"relationship\"", ",", "\...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer._init_params
Initialize parameters in the KVStore. Parameters with incomplete initialization are ignored.
python/mxnet/gluon/trainer.py
def _init_params(self): """Initialize parameters in the KVStore. Parameters with incomplete initialization are ignored. """ assert self._kv_initialized, "Cannot initialize parameters in KVStore " \ "when KVStore is not initialized." params_to_init = [] if self._kvstore: for param in self._params_to_init: if param._deferred_init: params_to_init.append(param) else: param_arrays = param._check_and_get(param._data, list) idx = self._param2idx[param.name] self._kvstore.init(idx, param_arrays[0]) if param._stype == 'default': self._kvstore.pull(idx, param_arrays, priority=-idx) self._params_to_init = params_to_init
def _init_params(self): """Initialize parameters in the KVStore. Parameters with incomplete initialization are ignored. """ assert self._kv_initialized, "Cannot initialize parameters in KVStore " \ "when KVStore is not initialized." params_to_init = [] if self._kvstore: for param in self._params_to_init: if param._deferred_init: params_to_init.append(param) else: param_arrays = param._check_and_get(param._data, list) idx = self._param2idx[param.name] self._kvstore.init(idx, param_arrays[0]) if param._stype == 'default': self._kvstore.pull(idx, param_arrays, priority=-idx) self._params_to_init = params_to_init
[ "Initialize", "parameters", "in", "the", "KVStore", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L137-L157
[ "def", "_init_params", "(", "self", ")", ":", "assert", "self", ".", "_kv_initialized", ",", "\"Cannot initialize parameters in KVStore \"", "\"when KVStore is not initialized.\"", "params_to_init", "=", "[", "]", "if", "self", ".", "_kvstore", ":", "for", "param", "i...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer._reset_kvstore
Reset kvstore.
python/mxnet/gluon/trainer.py
def _reset_kvstore(self): """Reset kvstore.""" if self._kvstore and 'dist' in self._kvstore.type: raise RuntimeError("Cannot reset distributed KVStore.") self._kv_initialized = False self._kvstore = None self._distributed = None self._update_on_kvstore = None self._params_to_init = [param for param in self._params]
def _reset_kvstore(self): """Reset kvstore.""" if self._kvstore and 'dist' in self._kvstore.type: raise RuntimeError("Cannot reset distributed KVStore.") self._kv_initialized = False self._kvstore = None self._distributed = None self._update_on_kvstore = None self._params_to_init = [param for param in self._params]
[ "Reset", "kvstore", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L159-L167
[ "def", "_reset_kvstore", "(", "self", ")", ":", "if", "self", ".", "_kvstore", "and", "'dist'", "in", "self", ".", "_kvstore", ".", "type", ":", "raise", "RuntimeError", "(", "\"Cannot reset distributed KVStore.\"", ")", "self", ".", "_kv_initialized", "=", "F...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer._init_kvstore
Create kvstore.
python/mxnet/gluon/trainer.py
def _init_kvstore(self): """Create kvstore.""" config = self._kvstore_params # configure kvstore, update_on_kvstore and self._distributed on three cases: if self._contains_sparse_weight: # If weight is sparse, kvstore must be present and the weight must be updated on kvstore. # The training loop is the following: # - row_sparse_pull(sparse_weight) # - forward() # - backward() # - push_and_update(grad) # - pull(weight) kvstore, update_on_kvstore = _create_sparse_kvstore(config['kvstore']) self._distributed = 'dist' in kvstore.type # raise err if user provides unsupported configs if config['update_on_kvstore'] is False: raise ValueError("Cannot set update_on_kvstore=False when sparse weights " "are present.") elif self._contains_sparse_grad: # For single node training with dense weight and sparse grad, # we prefer update_on_kvstore=False because this is usually faster. # This means we push and pull sparse gradients, and we do not store weight in kvstore. # The training loop is the following: # - forward() # - backward() # - push(grad) # - pull(grad) # - update(grad, weight) # # For multi-node training with dense weight and sparse grad, # only update_on_kvstore=True is supported, due to the fact that # kv.row_sparse_pull(grad) is not implemented. # Therefore, we push sparse gradients and pull dense weights. # The training loop contains: # - forward() # - backward() # - push_and_update(grad) # - pull(weight) arg_arrays = {param.name: param.data(self._contexts[0]) for param in self._params} kvstore, _ = _create_kvstore(config['kvstore'], len(self._contexts), arg_arrays) self._distributed = 'dist' in kvstore.type if kvstore else False update_on_kvstore = self._distributed # raise err if user provides unsupported configs if config['update_on_kvstore'] is not None: if config['update_on_kvstore'] is False and self._distributed: raise ValueError("Cannot set update_on_kvstore=False on dist kvstore " "when sparse gradients are present.") update_on_kvstore = config['update_on_kvstore'] else: # Training with dense weight and dense gradients. # The only unsupported mode is async with update_on_kvstore=False arg_arrays = {param.name: param.data(self._contexts[0]) for param in self._params} kvstore, update_on_kvstore = _create_kvstore(config['kvstore'], len(self._contexts), arg_arrays) self._distributed = 'dist' in kvstore.type if kvstore else False if self._distributed and 'async' in kvstore.type: update_on_kvstore = True # raise err if user provides unsupported configs if config['update_on_kvstore'] is False: raise ValueError("Please set update_on_kvstore=True " "when training in async mode.") if config['update_on_kvstore'] is not None: update_on_kvstore = config['update_on_kvstore'] # set grad compression and optimizers if kvstore: if self._compression_params: kvstore.set_gradient_compression(self._compression_params) if update_on_kvstore: # optimizer preferably needs to be set before init for multiprecision kvstore.set_optimizer(self._optimizer) self._kvstore = kvstore self._update_on_kvstore = update_on_kvstore else: self._kvstore = None self._update_on_kvstore = None self._kv_initialized = True
def _init_kvstore(self): """Create kvstore.""" config = self._kvstore_params # configure kvstore, update_on_kvstore and self._distributed on three cases: if self._contains_sparse_weight: # If weight is sparse, kvstore must be present and the weight must be updated on kvstore. # The training loop is the following: # - row_sparse_pull(sparse_weight) # - forward() # - backward() # - push_and_update(grad) # - pull(weight) kvstore, update_on_kvstore = _create_sparse_kvstore(config['kvstore']) self._distributed = 'dist' in kvstore.type # raise err if user provides unsupported configs if config['update_on_kvstore'] is False: raise ValueError("Cannot set update_on_kvstore=False when sparse weights " "are present.") elif self._contains_sparse_grad: # For single node training with dense weight and sparse grad, # we prefer update_on_kvstore=False because this is usually faster. # This means we push and pull sparse gradients, and we do not store weight in kvstore. # The training loop is the following: # - forward() # - backward() # - push(grad) # - pull(grad) # - update(grad, weight) # # For multi-node training with dense weight and sparse grad, # only update_on_kvstore=True is supported, due to the fact that # kv.row_sparse_pull(grad) is not implemented. # Therefore, we push sparse gradients and pull dense weights. # The training loop contains: # - forward() # - backward() # - push_and_update(grad) # - pull(weight) arg_arrays = {param.name: param.data(self._contexts[0]) for param in self._params} kvstore, _ = _create_kvstore(config['kvstore'], len(self._contexts), arg_arrays) self._distributed = 'dist' in kvstore.type if kvstore else False update_on_kvstore = self._distributed # raise err if user provides unsupported configs if config['update_on_kvstore'] is not None: if config['update_on_kvstore'] is False and self._distributed: raise ValueError("Cannot set update_on_kvstore=False on dist kvstore " "when sparse gradients are present.") update_on_kvstore = config['update_on_kvstore'] else: # Training with dense weight and dense gradients. # The only unsupported mode is async with update_on_kvstore=False arg_arrays = {param.name: param.data(self._contexts[0]) for param in self._params} kvstore, update_on_kvstore = _create_kvstore(config['kvstore'], len(self._contexts), arg_arrays) self._distributed = 'dist' in kvstore.type if kvstore else False if self._distributed and 'async' in kvstore.type: update_on_kvstore = True # raise err if user provides unsupported configs if config['update_on_kvstore'] is False: raise ValueError("Please set update_on_kvstore=True " "when training in async mode.") if config['update_on_kvstore'] is not None: update_on_kvstore = config['update_on_kvstore'] # set grad compression and optimizers if kvstore: if self._compression_params: kvstore.set_gradient_compression(self._compression_params) if update_on_kvstore: # optimizer preferably needs to be set before init for multiprecision kvstore.set_optimizer(self._optimizer) self._kvstore = kvstore self._update_on_kvstore = update_on_kvstore else: self._kvstore = None self._update_on_kvstore = None self._kv_initialized = True
[ "Create", "kvstore", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L169-L248
[ "def", "_init_kvstore", "(", "self", ")", ":", "config", "=", "self", ".", "_kvstore_params", "# configure kvstore, update_on_kvstore and self._distributed on three cases:", "if", "self", ".", "_contains_sparse_weight", ":", "# If weight is sparse, kvstore must be present and the w...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer.set_learning_rate
Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer.
python/mxnet/gluon/trainer.py
def set_learning_rate(self, lr): """Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer. """ if not isinstance(self._optimizer, opt.Optimizer): raise UserWarning("Optimizer has to be defined before its learning " "rate is mutated.") else: self._optimizer.set_learning_rate(lr)
def set_learning_rate(self, lr): """Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer. """ if not isinstance(self._optimizer, opt.Optimizer): raise UserWarning("Optimizer has to be defined before its learning " "rate is mutated.") else: self._optimizer.set_learning_rate(lr)
[ "Sets", "a", "new", "learning", "rate", "of", "the", "optimizer", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L258-L270
[ "def", "set_learning_rate", "(", "self", ",", "lr", ")", ":", "if", "not", "isinstance", "(", "self", ".", "_optimizer", ",", "opt", ".", "Optimizer", ")", ":", "raise", "UserWarning", "(", "\"Optimizer has to be defined before its learning \"", "\"rate is mutated.\...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer._row_sparse_pull
Internal method to invoke pull operations on KVStore. If `full_idx` is set to True, `kv.pull` is preferred instead of `kv.row_sparse_pull`.
python/mxnet/gluon/trainer.py
def _row_sparse_pull(self, parameter, out, row_id, full_idx=False): """Internal method to invoke pull operations on KVStore. If `full_idx` is set to True, `kv.pull` is preferred instead of `kv.row_sparse_pull`. """ # initialize kv and params if not already if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() idx = self._param2idx[parameter.name] if full_idx and 'dist' not in self._kvstore.type: assert row_id.size == out.shape[0] self._kvstore.pull(idx, out=out, priority=-idx, ignore_sparse=False) else: self._kvstore.row_sparse_pull(idx, out=out, row_ids=row_id, priority=-idx)
def _row_sparse_pull(self, parameter, out, row_id, full_idx=False): """Internal method to invoke pull operations on KVStore. If `full_idx` is set to True, `kv.pull` is preferred instead of `kv.row_sparse_pull`. """ # initialize kv and params if not already if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() idx = self._param2idx[parameter.name] if full_idx and 'dist' not in self._kvstore.type: assert row_id.size == out.shape[0] self._kvstore.pull(idx, out=out, priority=-idx, ignore_sparse=False) else: self._kvstore.row_sparse_pull(idx, out=out, row_ids=row_id, priority=-idx)
[ "Internal", "method", "to", "invoke", "pull", "operations", "on", "KVStore", ".", "If", "full_idx", "is", "set", "to", "True", "kv", ".", "pull", "is", "preferred", "instead", "of", "kv", ".", "row_sparse_pull", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L272-L286
[ "def", "_row_sparse_pull", "(", "self", ",", "parameter", ",", "out", ",", "row_id", ",", "full_idx", "=", "False", ")", ":", "# initialize kv and params if not already", "if", "not", "self", ".", "_kv_initialized", ":", "self", ".", "_init_kvstore", "(", ")", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer.step
Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update.
python/mxnet/gluon/trainer.py
def step(self, batch_size, ignore_stale_grad=False): """Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update. """ rescale_grad = self._scale / batch_size self._check_and_rescale_grad(rescale_grad) if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() self._allreduce_grads() self._update(ignore_stale_grad)
def step(self, batch_size, ignore_stale_grad=False): """Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update. """ rescale_grad = self._scale / batch_size self._check_and_rescale_grad(rescale_grad) if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() self._allreduce_grads() self._update(ignore_stale_grad)
[ "Makes", "one", "step", "of", "parameter", "update", ".", "Should", "be", "called", "after", "autograd", ".", "backward", "()", "and", "outside", "of", "record", "()", "scope", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L298-L325
[ "def", "step", "(", "self", ",", "batch_size", ",", "ignore_stale_grad", "=", "False", ")", ":", "rescale_grad", "=", "self", ".", "_scale", "/", "batch_size", "self", ".", "_check_and_rescale_grad", "(", "rescale_grad", ")", "if", "not", "self", ".", "_kv_i...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer.allreduce_grads
For each parameter, reduce the gradients from different contexts. Should be called after `autograd.backward()`, outside of `record()` scope, and before `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately.
python/mxnet/gluon/trainer.py
def allreduce_grads(self): """For each parameter, reduce the gradients from different contexts. Should be called after `autograd.backward()`, outside of `record()` scope, and before `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. """ if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() assert not (self._kvstore and self._update_on_kvstore), \ 'allreduce_grads() when parameters are updated on kvstore ' \ 'is not supported. Try setting `update_on_kvstore` ' \ 'to False when creating trainer.' self._allreduce_grads()
def allreduce_grads(self): """For each parameter, reduce the gradients from different contexts. Should be called after `autograd.backward()`, outside of `record()` scope, and before `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. """ if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() assert not (self._kvstore and self._update_on_kvstore), \ 'allreduce_grads() when parameters are updated on kvstore ' \ 'is not supported. Try setting `update_on_kvstore` ' \ 'to False when creating trainer.' self._allreduce_grads()
[ "For", "each", "parameter", "reduce", "the", "gradients", "from", "different", "contexts", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L327-L347
[ "def", "allreduce_grads", "(", "self", ")", ":", "if", "not", "self", ".", "_kv_initialized", ":", "self", ".", "_init_kvstore", "(", ")", "if", "self", ".", "_params_to_init", ":", "self", ".", "_init_params", "(", ")", "assert", "not", "(", "self", "."...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer.update
Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope, and after `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update.
python/mxnet/gluon/trainer.py
def update(self, batch_size, ignore_stale_grad=False): """Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope, and after `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update. """ if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() assert not (self._kvstore and self._update_on_kvstore), \ 'update() when parameters are updated on kvstore ' \ 'is not supported. Try setting `update_on_kvstore` ' \ 'to False when creating trainer.' self._check_and_rescale_grad(self._scale / batch_size) self._update(ignore_stale_grad)
def update(self, batch_size, ignore_stale_grad=False): """Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope, and after `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update. """ if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() assert not (self._kvstore and self._update_on_kvstore), \ 'update() when parameters are updated on kvstore ' \ 'is not supported. Try setting `update_on_kvstore` ' \ 'to False when creating trainer.' self._check_and_rescale_grad(self._scale / batch_size) self._update(ignore_stale_grad)
[ "Makes", "one", "step", "of", "parameter", "update", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L359-L390
[ "def", "update", "(", "self", ",", "batch_size", ",", "ignore_stale_grad", "=", "False", ")", ":", "if", "not", "self", ".", "_kv_initialized", ":", "self", ".", "_init_kvstore", "(", ")", "if", "self", ".", "_params_to_init", ":", "self", ".", "_init_para...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer.save_states
Saves trainer states (e.g. optimizer, momentum) to a file. Parameters ---------- fname : str Path to output states file. Note ---- `optimizer.param_dict`, which contains Parameter information (such as `lr_mult` and `wd_mult`) will not be saved.
python/mxnet/gluon/trainer.py
def save_states(self, fname): """Saves trainer states (e.g. optimizer, momentum) to a file. Parameters ---------- fname : str Path to output states file. Note ---- `optimizer.param_dict`, which contains Parameter information (such as `lr_mult` and `wd_mult`) will not be saved. """ assert self._optimizer is not None if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() if self._update_on_kvstore: assert not self._params_to_init, "Cannot save trainer states when some " \ "parameters are not yet initialized in kvstore." self._kvstore.save_optimizer_states(fname, dump_optimizer=True) else: with open(fname, 'wb') as fout: fout.write(self._updaters[0].get_states(dump_optimizer=True))
def save_states(self, fname): """Saves trainer states (e.g. optimizer, momentum) to a file. Parameters ---------- fname : str Path to output states file. Note ---- `optimizer.param_dict`, which contains Parameter information (such as `lr_mult` and `wd_mult`) will not be saved. """ assert self._optimizer is not None if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() if self._update_on_kvstore: assert not self._params_to_init, "Cannot save trainer states when some " \ "parameters are not yet initialized in kvstore." self._kvstore.save_optimizer_states(fname, dump_optimizer=True) else: with open(fname, 'wb') as fout: fout.write(self._updaters[0].get_states(dump_optimizer=True))
[ "Saves", "trainer", "states", "(", "e", ".", "g", ".", "optimizer", "momentum", ")", "to", "a", "file", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L429-L456
[ "def", "save_states", "(", "self", ",", "fname", ")", ":", "assert", "self", ".", "_optimizer", "is", "not", "None", "if", "not", "self", ".", "_kv_initialized", ":", "self", ".", "_init_kvstore", "(", ")", "if", "self", ".", "_params_to_init", ":", "sel...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
Trainer.load_states
Loads trainer states (e.g. optimizer, momentum) from a file. Parameters ---------- fname : str Path to input states file. Note ---- `optimizer.param_dict`, which contains Parameter information (such as `lr_mult` and `wd_mult`) will not be loaded from the file, but rather set based on current Trainer's parameters.
python/mxnet/gluon/trainer.py
def load_states(self, fname): """Loads trainer states (e.g. optimizer, momentum) from a file. Parameters ---------- fname : str Path to input states file. Note ---- `optimizer.param_dict`, which contains Parameter information (such as `lr_mult` and `wd_mult`) will not be loaded from the file, but rather set based on current Trainer's parameters. """ if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() if self._update_on_kvstore: self._kvstore.load_optimizer_states(fname) self._optimizer = self._kvstore._updater.optimizer else: with open(fname, 'rb') as f: states = f.read() for updater in self._updaters: updater.set_states(states) updater.optimizer = self._updaters[0].optimizer self._optimizer = self._updaters[0].optimizer param_dict = {i: param for i, param in enumerate(self._params)} self._optimizer.param_dict = param_dict
def load_states(self, fname): """Loads trainer states (e.g. optimizer, momentum) from a file. Parameters ---------- fname : str Path to input states file. Note ---- `optimizer.param_dict`, which contains Parameter information (such as `lr_mult` and `wd_mult`) will not be loaded from the file, but rather set based on current Trainer's parameters. """ if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() if self._update_on_kvstore: self._kvstore.load_optimizer_states(fname) self._optimizer = self._kvstore._updater.optimizer else: with open(fname, 'rb') as f: states = f.read() for updater in self._updaters: updater.set_states(states) updater.optimizer = self._updaters[0].optimizer self._optimizer = self._updaters[0].optimizer param_dict = {i: param for i, param in enumerate(self._params)} self._optimizer.param_dict = param_dict
[ "Loads", "trainer", "states", "(", "e", ".", "g", ".", "optimizer", "momentum", ")", "from", "a", "file", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L458-L488
[ "def", "load_states", "(", "self", ",", "fname", ")", ":", "if", "not", "self", ".", "_kv_initialized", ":", "self", ".", "_init_kvstore", "(", ")", "if", "self", ".", "_params_to_init", ":", "self", ".", "_init_params", "(", ")", "if", "self", ".", "_...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
estimate_density
sample 10 times of a size of 1000 for estimating the density of the sparse dataset
benchmark/python/sparse/util.py
def estimate_density(DATA_PATH, feature_size): """sample 10 times of a size of 1000 for estimating the density of the sparse dataset""" if not os.path.exists(DATA_PATH): raise Exception("Data is not there!") density = [] P = 0.01 for _ in range(10): num_non_zero = 0 num_sample = 0 with open(DATA_PATH) as f: for line in f: if (random.random() < P): num_non_zero += len(line.split(" ")) - 1 num_sample += 1 density.append(num_non_zero * 1.0 / (feature_size * num_sample)) return sum(density) / len(density)
def estimate_density(DATA_PATH, feature_size): """sample 10 times of a size of 1000 for estimating the density of the sparse dataset""" if not os.path.exists(DATA_PATH): raise Exception("Data is not there!") density = [] P = 0.01 for _ in range(10): num_non_zero = 0 num_sample = 0 with open(DATA_PATH) as f: for line in f: if (random.random() < P): num_non_zero += len(line.split(" ")) - 1 num_sample += 1 density.append(num_non_zero * 1.0 / (feature_size * num_sample)) return sum(density) / len(density)
[ "sample", "10", "times", "of", "a", "size", "of", "1000", "for", "estimating", "the", "density", "of", "the", "sparse", "dataset" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/benchmark/python/sparse/util.py#L21-L36
[ "def", "estimate_density", "(", "DATA_PATH", ",", "feature_size", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "DATA_PATH", ")", ":", "raise", "Exception", "(", "\"Data is not there!\"", ")", "density", "=", "[", "]", "P", "=", "0.01", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
exec_cmd
Execute the command line command.
example/reinforcement-learning/a3c/launcher.py
def exec_cmd(cmd, role, taskid, pass_env): """Execute the command line command.""" if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt': cmd[0] = './' + cmd[0] cmd = ' '.join(cmd) env = os.environ.copy() for k, v in pass_env.items(): env[k] = str(v) env['DMLC_TASK_ID'] = str(taskid) env['DMLC_ROLE'] = role env['DMLC_JOB_CLUSTER'] = 'local' ntrial = 0 while True: if os.name == 'nt': env['DMLC_NUM_ATTEMPT'] = str(ntrial) ret = subprocess.call(cmd, shell=True, env=env) if ret != 0: ntrial += 1 continue else: bash = cmd ret = subprocess.call(bash, shell=True, executable='bash', env=env) if ret == 0: logging.debug('Thread %d exit with 0', taskid) return else: if os.name == 'nt': sys.exit(-1) else: raise RuntimeError('Get nonzero return code=%d' % ret)
def exec_cmd(cmd, role, taskid, pass_env): """Execute the command line command.""" if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt': cmd[0] = './' + cmd[0] cmd = ' '.join(cmd) env = os.environ.copy() for k, v in pass_env.items(): env[k] = str(v) env['DMLC_TASK_ID'] = str(taskid) env['DMLC_ROLE'] = role env['DMLC_JOB_CLUSTER'] = 'local' ntrial = 0 while True: if os.name == 'nt': env['DMLC_NUM_ATTEMPT'] = str(ntrial) ret = subprocess.call(cmd, shell=True, env=env) if ret != 0: ntrial += 1 continue else: bash = cmd ret = subprocess.call(bash, shell=True, executable='bash', env=env) if ret == 0: logging.debug('Thread %d exit with 0', taskid) return else: if os.name == 'nt': sys.exit(-1) else: raise RuntimeError('Get nonzero return code=%d' % ret)
[ "Execute", "the", "command", "line", "command", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/reinforcement-learning/a3c/launcher.py#L46-L77
[ "def", "exec_cmd", "(", "cmd", ",", "role", ",", "taskid", ",", "pass_env", ")", ":", "if", "cmd", "[", "0", "]", ".", "find", "(", "'/'", ")", "==", "-", "1", "and", "os", ".", "path", ".", "exists", "(", "cmd", "[", "0", "]", ")", "and", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
submit
Submit function of local jobs.
example/reinforcement-learning/a3c/launcher.py
def submit(args): gpus = args.gpus.strip().split(',') """Submit function of local jobs.""" def mthread_submit(nworker, nserver, envs): """ customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters ---------- nworker: number of slave process to start up nserver: number of server nodes to start up envs: enviroment variables to be added to the starting programs """ procs = {} for i, gpu in enumerate(gpus): for j in range(args.num_threads): procs[i] = Thread(target=exec_cmd, args=(args.command + ['--gpus=%s'%gpu], 'worker', i*args.num_threads+j, envs)) procs[i].setDaemon(True) procs[i].start() for i in range(len(gpus)*args.num_threads, len(gpus)*args.num_threads + nserver): procs[i] = Thread(target=exec_cmd, args=(args.command, 'server', i, envs)) procs[i].setDaemon(True) procs[i].start() # call submit, with nslave, the commands to run each job and submit function tracker.submit(args.num_threads*len(gpus), args.num_servers, fun_submit=mthread_submit, pscmd=(' '.join(args.command)))
def submit(args): gpus = args.gpus.strip().split(',') """Submit function of local jobs.""" def mthread_submit(nworker, nserver, envs): """ customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters ---------- nworker: number of slave process to start up nserver: number of server nodes to start up envs: enviroment variables to be added to the starting programs """ procs = {} for i, gpu in enumerate(gpus): for j in range(args.num_threads): procs[i] = Thread(target=exec_cmd, args=(args.command + ['--gpus=%s'%gpu], 'worker', i*args.num_threads+j, envs)) procs[i].setDaemon(True) procs[i].start() for i in range(len(gpus)*args.num_threads, len(gpus)*args.num_threads + nserver): procs[i] = Thread(target=exec_cmd, args=(args.command, 'server', i, envs)) procs[i].setDaemon(True) procs[i].start() # call submit, with nslave, the commands to run each job and submit function tracker.submit(args.num_threads*len(gpus), args.num_servers, fun_submit=mthread_submit, pscmd=(' '.join(args.command)))
[ "Submit", "function", "of", "local", "jobs", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/reinforcement-learning/a3c/launcher.py#L79-L106
[ "def", "submit", "(", "args", ")", ":", "gpus", "=", "args", ".", "gpus", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "def", "mthread_submit", "(", "nworker", ",", "nserver", ",", "envs", ")", ":", "\"\"\"\n customized submit script, that...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
CtcMetrics.ctc_label
Iterates through p, identifying non-zero and non-repeating values, and returns them in a list Parameters ---------- p: list of int Returns ------- list of int
example/ctc/ctc_metrics.py
def ctc_label(p): """Iterates through p, identifying non-zero and non-repeating values, and returns them in a list Parameters ---------- p: list of int Returns ------- list of int """ ret = [] p1 = [0] + p for i, _ in enumerate(p): c1 = p1[i] c2 = p1[i+1] if c2 in (0, c1): continue ret.append(c2) return ret
def ctc_label(p): """Iterates through p, identifying non-zero and non-repeating values, and returns them in a list Parameters ---------- p: list of int Returns ------- list of int """ ret = [] p1 = [0] + p for i, _ in enumerate(p): c1 = p1[i] c2 = p1[i+1] if c2 in (0, c1): continue ret.append(c2) return ret
[ "Iterates", "through", "p", "identifying", "non", "-", "zero", "and", "non", "-", "repeating", "values", "and", "returns", "them", "in", "a", "list", "Parameters", "----------", "p", ":", "list", "of", "int" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/ctc_metrics.py#L34-L51
[ "def", "ctc_label", "(", "p", ")", ":", "ret", "=", "[", "]", "p1", "=", "[", "0", "]", "+", "p", "for", "i", ",", "_", "in", "enumerate", "(", "p", ")", ":", "c1", "=", "p1", "[", "i", "]", "c2", "=", "p1", "[", "i", "+", "1", "]", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
CtcMetrics._remove_blank
Removes trailing zeros in the list of integers and returns a new list of integers
example/ctc/ctc_metrics.py
def _remove_blank(l): """ Removes trailing zeros in the list of integers and returns a new list of integers""" ret = [] for i, _ in enumerate(l): if l[i] == 0: break ret.append(l[i]) return ret
def _remove_blank(l): """ Removes trailing zeros in the list of integers and returns a new list of integers""" ret = [] for i, _ in enumerate(l): if l[i] == 0: break ret.append(l[i]) return ret
[ "Removes", "trailing", "zeros", "in", "the", "list", "of", "integers", "and", "returns", "a", "new", "list", "of", "integers" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/ctc_metrics.py#L54-L61
[ "def", "_remove_blank", "(", "l", ")", ":", "ret", "=", "[", "]", "for", "i", ",", "_", "in", "enumerate", "(", "l", ")", ":", "if", "l", "[", "i", "]", "==", "0", ":", "break", "ret", ".", "append", "(", "l", "[", "i", "]", ")", "return", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
CtcMetrics._lcs
Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length
example/ctc/ctc_metrics.py
def _lcs(p, l): """ Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length""" # Dynamic Programming Finding LCS if len(p) == 0: return 0 P = np.array(list(p)).reshape((1, len(p))) L = np.array(list(l)).reshape((len(l), 1)) M = np.ndarray(shape=(len(P), len(L)), dtype=np.int32) for i in range(M.shape[0]): for j in range(M.shape[1]): up = 0 if i == 0 else M[i-1, j] left = 0 if j == 0 else M[i, j-1] if i == 0 or j == 0: M[i, j] = max(up, left, M[i, j]) else: M[i, j] = M[i, j] + M[i - 1, j - 1] return M.max()
def _lcs(p, l): """ Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length""" # Dynamic Programming Finding LCS if len(p) == 0: return 0 P = np.array(list(p)).reshape((1, len(p))) L = np.array(list(l)).reshape((len(l), 1)) M = np.ndarray(shape=(len(P), len(L)), dtype=np.int32) for i in range(M.shape[0]): for j in range(M.shape[1]): up = 0 if i == 0 else M[i-1, j] left = 0 if j == 0 else M[i, j-1] if i == 0 or j == 0: M[i, j] = max(up, left, M[i, j]) else: M[i, j] = M[i, j] + M[i - 1, j - 1] return M.max()
[ "Calculates", "the", "Longest", "Common", "Subsequence", "between", "p", "and", "l", "(", "both", "list", "of", "int", ")", "and", "returns", "its", "length" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/ctc_metrics.py#L64-L81
[ "def", "_lcs", "(", "p", ",", "l", ")", ":", "# Dynamic Programming Finding LCS", "if", "len", "(", "p", ")", "==", "0", ":", "return", "0", "P", "=", "np", ".", "array", "(", "list", "(", "p", ")", ")", ".", "reshape", "(", "(", "1", ",", "len...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
CtcMetrics.accuracy
Simple accuracy measure: number of 100% accurate predictions divided by total number
example/ctc/ctc_metrics.py
def accuracy(self, label, pred): """ Simple accuracy measure: number of 100% accurate predictions divided by total number """ hit = 0. total = 0. batch_size = label.shape[0] for i in range(batch_size): l = self._remove_blank(label[i]) p = [] for k in range(self.seq_len): p.append(np.argmax(pred[k * batch_size + i])) p = self.ctc_label(p) if len(p) == len(l): match = True for k, _ in enumerate(p): if p[k] != int(l[k]): match = False break if match: hit += 1.0 total += 1.0 assert total == batch_size return hit / total
def accuracy(self, label, pred): """ Simple accuracy measure: number of 100% accurate predictions divided by total number """ hit = 0. total = 0. batch_size = label.shape[0] for i in range(batch_size): l = self._remove_blank(label[i]) p = [] for k in range(self.seq_len): p.append(np.argmax(pred[k * batch_size + i])) p = self.ctc_label(p) if len(p) == len(l): match = True for k, _ in enumerate(p): if p[k] != int(l[k]): match = False break if match: hit += 1.0 total += 1.0 assert total == batch_size return hit / total
[ "Simple", "accuracy", "measure", ":", "number", "of", "100%", "accurate", "predictions", "divided", "by", "total", "number" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/ctc_metrics.py#L83-L104
[ "def", "accuracy", "(", "self", ",", "label", ",", "pred", ")", ":", "hit", "=", "0.", "total", "=", "0.", "batch_size", "=", "label", ".", "shape", "[", "0", "]", "for", "i", "in", "range", "(", "batch_size", ")", ":", "l", "=", "self", ".", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
CtcMetrics.accuracy_lcs
Longest Common Subsequence accuracy measure: calculate accuracy of each prediction as LCS/length
example/ctc/ctc_metrics.py
def accuracy_lcs(self, label, pred): """ Longest Common Subsequence accuracy measure: calculate accuracy of each prediction as LCS/length""" hit = 0. total = 0. batch_size = label.shape[0] for i in range(batch_size): l = self._remove_blank(label[i]) p = [] for k in range(self.seq_len): p.append(np.argmax(pred[k * batch_size + i])) p = self.ctc_label(p) hit += self._lcs(p, l) * 1.0 / len(l) total += 1.0 assert total == batch_size return hit / total
def accuracy_lcs(self, label, pred): """ Longest Common Subsequence accuracy measure: calculate accuracy of each prediction as LCS/length""" hit = 0. total = 0. batch_size = label.shape[0] for i in range(batch_size): l = self._remove_blank(label[i]) p = [] for k in range(self.seq_len): p.append(np.argmax(pred[k * batch_size + i])) p = self.ctc_label(p) hit += self._lcs(p, l) * 1.0 / len(l) total += 1.0 assert total == batch_size return hit / total
[ "Longest", "Common", "Subsequence", "accuracy", "measure", ":", "calculate", "accuracy", "of", "each", "prediction", "as", "LCS", "/", "length" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/ctc_metrics.py#L106-L120
[ "def", "accuracy_lcs", "(", "self", ",", "label", ",", "pred", ")", ":", "hit", "=", "0.", "total", "=", "0.", "batch_size", "=", "label", ".", "shape", "[", "0", "]", "for", "i", "in", "range", "(", "batch_size", ")", ":", "l", "=", "self", ".",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
get_movielens_iter
Not particularly fast code to parse the text file and load into NDArrays. return two data iters, one for train, the other for validation.
example/sparse/matrix_factorization/data.py
def get_movielens_iter(filename, batch_size): """Not particularly fast code to parse the text file and load into NDArrays. return two data iters, one for train, the other for validation. """ logging.info("Preparing data iterators for " + filename + " ... ") user = [] item = [] score = [] with open(filename, 'r') as f: num_samples = 0 for line in f: tks = line.strip().split('::') if len(tks) != 4: continue num_samples += 1 user.append((tks[0])) item.append((tks[1])) score.append((tks[2])) # convert to ndarrays user = mx.nd.array(user, dtype='int32') item = mx.nd.array(item) score = mx.nd.array(score) # prepare data iters data_train = {'user': user, 'item': item} label_train = {'score': score} iter_train = mx.io.NDArrayIter(data=data_train,label=label_train, batch_size=batch_size, shuffle=True) return mx.io.PrefetchingIter(iter_train)
def get_movielens_iter(filename, batch_size): """Not particularly fast code to parse the text file and load into NDArrays. return two data iters, one for train, the other for validation. """ logging.info("Preparing data iterators for " + filename + " ... ") user = [] item = [] score = [] with open(filename, 'r') as f: num_samples = 0 for line in f: tks = line.strip().split('::') if len(tks) != 4: continue num_samples += 1 user.append((tks[0])) item.append((tks[1])) score.append((tks[2])) # convert to ndarrays user = mx.nd.array(user, dtype='int32') item = mx.nd.array(item) score = mx.nd.array(score) # prepare data iters data_train = {'user': user, 'item': item} label_train = {'score': score} iter_train = mx.io.NDArrayIter(data=data_train,label=label_train, batch_size=batch_size, shuffle=True) return mx.io.PrefetchingIter(iter_train)
[ "Not", "particularly", "fast", "code", "to", "parse", "the", "text", "file", "and", "load", "into", "NDArrays", ".", "return", "two", "data", "iters", "one", "for", "train", "the", "other", "for", "validation", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/sparse/matrix_factorization/data.py#L29-L56
[ "def", "get_movielens_iter", "(", "filename", ",", "batch_size", ")", ":", "logging", ".", "info", "(", "\"Preparing data iterators for \"", "+", "filename", "+", "\" ... \"", ")", "user", "=", "[", "]", "item", "=", "[", "]", "score", "=", "[", "]", "with...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
imdecode
Decode image from str buffer. Wrapper for cv2.imdecode that uses mx.nd.NDArray Parameters ---------- str_img : str str buffer read from image file flag : int same as flag for cv2.imdecode Returns ------- img : NDArray decoded image in (width, height, channels) with BGR color channel order
plugin/opencv/opencv.py
def imdecode(str_img, flag=1): """Decode image from str buffer. Wrapper for cv2.imdecode that uses mx.nd.NDArray Parameters ---------- str_img : str str buffer read from image file flag : int same as flag for cv2.imdecode Returns ------- img : NDArray decoded image in (width, height, channels) with BGR color channel order """ hdl = NDArrayHandle() check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img), mx_uint(len(str_img)), flag, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
def imdecode(str_img, flag=1): """Decode image from str buffer. Wrapper for cv2.imdecode that uses mx.nd.NDArray Parameters ---------- str_img : str str buffer read from image file flag : int same as flag for cv2.imdecode Returns ------- img : NDArray decoded image in (width, height, channels) with BGR color channel order """ hdl = NDArrayHandle() check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img), mx_uint(len(str_img)), flag, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
[ "Decode", "image", "from", "str", "buffer", ".", "Wrapper", "for", "cv2", ".", "imdecode", "that", "uses", "mx", ".", "nd", ".", "NDArray" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L29-L49
[ "def", "imdecode", "(", "str_img", ",", "flag", "=", "1", ")", ":", "hdl", "=", "NDArrayHandle", "(", ")", "check_call", "(", "_LIB", ".", "MXCVImdecode", "(", "ctypes", ".", "c_char_p", "(", "str_img", ")", ",", "mx_uint", "(", "len", "(", "str_img", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
resize
Decode image from str buffer. Wrapper for cv2.imresize that uses mx.nd.NDArray Parameters ---------- src : NDArray image in (width, height, channels) size : tuple target size in (width, height) interpolation : int same as interpolation for cv2.imresize Returns ------- img : NDArray resized image
plugin/opencv/opencv.py
def resize(src, size, interpolation=cv2.INTER_LINEAR): """Decode image from str buffer. Wrapper for cv2.imresize that uses mx.nd.NDArray Parameters ---------- src : NDArray image in (width, height, channels) size : tuple target size in (width, height) interpolation : int same as interpolation for cv2.imresize Returns ------- img : NDArray resized image """ hdl = NDArrayHandle() check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]), interpolation, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
def resize(src, size, interpolation=cv2.INTER_LINEAR): """Decode image from str buffer. Wrapper for cv2.imresize that uses mx.nd.NDArray Parameters ---------- src : NDArray image in (width, height, channels) size : tuple target size in (width, height) interpolation : int same as interpolation for cv2.imresize Returns ------- img : NDArray resized image """ hdl = NDArrayHandle() check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]), interpolation, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
[ "Decode", "image", "from", "str", "buffer", ".", "Wrapper", "for", "cv2", ".", "imresize", "that", "uses", "mx", ".", "nd", ".", "NDArray" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L51-L72
[ "def", "resize", "(", "src", ",", "size", ",", "interpolation", "=", "cv2", ".", "INTER_LINEAR", ")", ":", "hdl", "=", "NDArrayHandle", "(", ")", "check_call", "(", "_LIB", ".", "MXCVResize", "(", "src", ".", "handle", ",", "mx_uint", "(", "size", "[",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
copyMakeBorder
Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image
plugin/opencv/opencv.py
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0): """Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image """ hdl = NDArrayHandle() check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot), ctypes.c_int(left), ctypes.c_int(right), ctypes.c_int(border_type), ctypes.c_double(value), ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0): """Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image """ hdl = NDArrayHandle() check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot), ctypes.c_int(left), ctypes.c_int(right), ctypes.c_int(border_type), ctypes.c_double(value), ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
[ "Pad", "image", "border", "Wrapper", "for", "cv2", ".", "copyMakeBorder", "that", "uses", "mx", ".", "nd", ".", "NDArray" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L74-L94
[ "def", "copyMakeBorder", "(", "src", ",", "top", ",", "bot", ",", "left", ",", "right", ",", "border_type", "=", "cv2", ".", "BORDER_CONSTANT", ",", "value", "=", "0", ")", ":", "hdl", "=", "NDArrayHandle", "(", ")", "check_call", "(", "_LIB", ".", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
fixed_crop
Crop src at fixed location, and (optionally) resize it to size
plugin/opencv/opencv.py
def fixed_crop(src, x0, y0, w, h, size=None, interpolation=cv2.INTER_CUBIC): """Crop src at fixed location, and (optionally) resize it to size""" out = mx.nd.crop(src, begin=(y0, x0, 0), end=(y0+h, x0+w, int(src.shape[2]))) if size is not None and (w, h) != size: out = resize(out, size, interpolation=interpolation) return out
def fixed_crop(src, x0, y0, w, h, size=None, interpolation=cv2.INTER_CUBIC): """Crop src at fixed location, and (optionally) resize it to size""" out = mx.nd.crop(src, begin=(y0, x0, 0), end=(y0+h, x0+w, int(src.shape[2]))) if size is not None and (w, h) != size: out = resize(out, size, interpolation=interpolation) return out
[ "Crop", "src", "at", "fixed", "location", "and", "(", "optionally", ")", "resize", "it", "to", "size" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L107-L112
[ "def", "fixed_crop", "(", "src", ",", "x0", ",", "y0", ",", "w", ",", "h", ",", "size", "=", "None", ",", "interpolation", "=", "cv2", ".", "INTER_CUBIC", ")", ":", "out", "=", "mx", ".", "nd", ".", "crop", "(", "src", ",", "begin", "=", "(", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
random_crop
Randomly crop src with size. Upsample result if src is smaller than size
plugin/opencv/opencv.py
def random_crop(src, size): """Randomly crop src with size. Upsample result if src is smaller than size""" h, w, _ = src.shape new_w, new_h = scale_down((w, h), size) x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size) return out, (x0, y0, new_w, new_h)
def random_crop(src, size): """Randomly crop src with size. Upsample result if src is smaller than size""" h, w, _ = src.shape new_w, new_h = scale_down((w, h), size) x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size) return out, (x0, y0, new_w, new_h)
[ "Randomly", "crop", "src", "with", "size", ".", "Upsample", "result", "if", "src", "is", "smaller", "than", "size" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L114-L123
[ "def", "random_crop", "(", "src", ",", "size", ")", ":", "h", ",", "w", ",", "_", "=", "src", ".", "shape", "new_w", ",", "new_h", "=", "scale_down", "(", "(", "w", ",", "h", ")", ",", "size", ")", "x0", "=", "random", ".", "randint", "(", "0...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
random_size_crop
Randomly crop src with size. Randomize area and aspect ratio
plugin/opencv/opencv.py
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)): """Randomly crop src with size. Randomize area and aspect ratio""" h, w, _ = src.shape area = w*h for _ in range(10): new_area = random.uniform(min_area, 1.0) * area new_ratio = random.uniform(*ratio) new_w = int(new_area*new_ratio) new_h = int(new_area/new_ratio) if random.uniform(0., 1.) < 0.5: new_w, new_h = new_h, new_w if new_w > w or new_h > h: continue x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size) return out, (x0, y0, new_w, new_h) return random_crop(src, size)
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)): """Randomly crop src with size. Randomize area and aspect ratio""" h, w, _ = src.shape area = w*h for _ in range(10): new_area = random.uniform(min_area, 1.0) * area new_ratio = random.uniform(*ratio) new_w = int(new_area*new_ratio) new_h = int(new_area/new_ratio) if random.uniform(0., 1.) < 0.5: new_w, new_h = new_h, new_w if new_w > w or new_h > h: continue x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size) return out, (x0, y0, new_w, new_h) return random_crop(src, size)
[ "Randomly", "crop", "src", "with", "size", ".", "Randomize", "area", "and", "aspect", "ratio" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L131-L153
[ "def", "random_size_crop", "(", "src", ",", "size", ",", "min_area", "=", "0.25", ",", "ratio", "=", "(", "3.0", "/", "4.0", ",", "4.0", "/", "3.0", ")", ")", ":", "h", ",", "w", ",", "_", "=", "src", ".", "shape", "area", "=", "w", "*", "h",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
ImageListIter.next
Move iterator position forward
plugin/opencv/opencv.py
def next(self): """Move iterator position forward""" batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3)) i = self.cur for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)): str_img = open(self.root+self.list[i]+'.jpg').read() img = imdecode(str_img, 1) img, _ = random_crop(img, self.size) batch[i - self.cur] = img batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2)) ret = mx.io.DataBatch(data=[batch], label=[], pad=self.batch_size-(i-self.cur), index=None) self.cur = i return ret
def next(self): """Move iterator position forward""" batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3)) i = self.cur for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)): str_img = open(self.root+self.list[i]+'.jpg').read() img = imdecode(str_img, 1) img, _ = random_crop(img, self.size) batch[i - self.cur] = img batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2)) ret = mx.io.DataBatch(data=[batch], label=[], pad=self.batch_size-(i-self.cur), index=None) self.cur = i return ret
[ "Move", "iterator", "position", "forward" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/plugin/opencv/opencv.py#L173-L188
[ "def", "next", "(", "self", ")", ":", "batch", "=", "mx", ".", "nd", ".", "zeros", "(", "(", "self", ".", "batch_size", ",", "self", ".", "size", "[", "1", "]", ",", "self", ".", "size", "[", "0", "]", ",", "3", ")", ")", "i", "=", "self", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
check_label_shapes
Check to see if the two arrays are the same size.
example/speech_recognition/stt_metric.py
def check_label_shapes(labels, preds, shape=0): """Check to see if the two arrays are the same size.""" if shape == 0: label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape, preds.shape if label_shape != pred_shape: raise ValueError("Shape of labels {} does not match shape of " "predictions {}".format(label_shape, pred_shape))
def check_label_shapes(labels, preds, shape=0): """Check to see if the two arrays are the same size.""" if shape == 0: label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape, preds.shape if label_shape != pred_shape: raise ValueError("Shape of labels {} does not match shape of " "predictions {}".format(label_shape, pred_shape))
[ "Check", "to", "see", "if", "the", "two", "arrays", "are", "the", "same", "size", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/speech_recognition/stt_metric.py#L25-L35
[ "def", "check_label_shapes", "(", "labels", ",", "preds", ",", "shape", "=", "0", ")", ":", "if", "shape", "==", "0", ":", "label_shape", ",", "pred_shape", "=", "len", "(", "labels", ")", ",", "len", "(", "preds", ")", "else", ":", "label_shape", ",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
import_to_gluon
Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object. Parameters ---------- model_file : str ONNX model file name ctx : Context or list of Context Loads the model into one or many context(s). Returns ------- sym_block : :class:`~mxnet.gluon.SymbolBlock` A SymbolBlock object representing the given model file. Notes ----- This method is available when you ``import mxnet.contrib.onnx``
python/mxnet/contrib/onnx/onnx2mx/import_to_gluon.py
def import_to_gluon(model_file, ctx): """ Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object. Parameters ---------- model_file : str ONNX model file name ctx : Context or list of Context Loads the model into one or many context(s). Returns ------- sym_block : :class:`~mxnet.gluon.SymbolBlock` A SymbolBlock object representing the given model file. Notes ----- This method is available when you ``import mxnet.contrib.onnx`` """ graph = GraphProto() try: import onnx except ImportError: raise ImportError("Onnx and protobuf need to be installed. Instructions to" + " install - https://github.com/onnx/onnx#installation") model_proto = onnx.load_model(model_file) net = graph.graph_to_gluon(model_proto.graph, ctx) return net
def import_to_gluon(model_file, ctx): """ Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object. Parameters ---------- model_file : str ONNX model file name ctx : Context or list of Context Loads the model into one or many context(s). Returns ------- sym_block : :class:`~mxnet.gluon.SymbolBlock` A SymbolBlock object representing the given model file. Notes ----- This method is available when you ``import mxnet.contrib.onnx`` """ graph = GraphProto() try: import onnx except ImportError: raise ImportError("Onnx and protobuf need to be installed. Instructions to" + " install - https://github.com/onnx/onnx#installation") model_proto = onnx.load_model(model_file) net = graph.graph_to_gluon(model_proto.graph, ctx) return net
[ "Imports", "the", "ONNX", "model", "files", "passed", "as", "a", "parameter", "into", "Gluon", "SymbolBlock", "object", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/import_to_gluon.py#L24-L53
[ "def", "import_to_gluon", "(", "model_file", ",", "ctx", ")", ":", "graph", "=", "GraphProto", "(", ")", "try", ":", "import", "onnx", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Onnx and protobuf need to be installed. Instructions to\"", "+", "\" ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
get_model
Model initialization.
example/gluon/image_classification.py
def get_model(model, ctx, opt): """Model initialization.""" kwargs = {'ctx': ctx, 'pretrained': opt.use_pretrained, 'classes': classes} if model.startswith('resnet'): kwargs['thumbnail'] = opt.use_thumbnail elif model.startswith('vgg'): kwargs['batch_norm'] = opt.batch_norm net = models.get_model(model, **kwargs) if opt.resume: net.load_parameters(opt.resume) elif not opt.use_pretrained: if model in ['alexnet']: net.initialize(mx.init.Normal()) else: net.initialize(mx.init.Xavier(magnitude=2)) net.cast(opt.dtype) return net
def get_model(model, ctx, opt): """Model initialization.""" kwargs = {'ctx': ctx, 'pretrained': opt.use_pretrained, 'classes': classes} if model.startswith('resnet'): kwargs['thumbnail'] = opt.use_thumbnail elif model.startswith('vgg'): kwargs['batch_norm'] = opt.batch_norm net = models.get_model(model, **kwargs) if opt.resume: net.load_parameters(opt.resume) elif not opt.use_pretrained: if model in ['alexnet']: net.initialize(mx.init.Normal()) else: net.initialize(mx.init.Xavier(magnitude=2)) net.cast(opt.dtype) return net
[ "Model", "initialization", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/image_classification.py#L117-L134
[ "def", "get_model", "(", "model", ",", "ctx", ",", "opt", ")", ":", "kwargs", "=", "{", "'ctx'", ":", "ctx", ",", "'pretrained'", ":", "opt", ".", "use_pretrained", ",", "'classes'", ":", "classes", "}", "if", "model", ".", "startswith", "(", "'resnet'...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
get_data_iters
get dataset iterators
example/gluon/image_classification.py
def get_data_iters(dataset, batch_size, opt): """get dataset iterators""" if dataset == 'mnist': train_data, val_data = get_mnist_iterator(batch_size, (1, 28, 28), num_parts=kv.num_workers, part_index=kv.rank) elif dataset == 'cifar10': train_data, val_data = get_cifar10_iterator(batch_size, (3, 32, 32), num_parts=kv.num_workers, part_index=kv.rank) elif dataset == 'imagenet': shape_dim = 299 if model_name == 'inceptionv3' else 224 if not opt.data_dir: raise ValueError('Dir containing raw images in train/val is required for imagenet.' 'Please specify "--data-dir"') train_data, val_data = get_imagenet_iterator(opt.data_dir, batch_size, opt.num_workers, shape_dim, opt.dtype) elif dataset == 'caltech101': train_data, val_data = get_caltech101_iterator(batch_size, opt.num_workers, opt.dtype) elif dataset == 'dummy': shape_dim = 299 if model_name == 'inceptionv3' else 224 train_data, val_data = dummy_iterator(batch_size, (3, shape_dim, shape_dim)) return train_data, val_data
def get_data_iters(dataset, batch_size, opt): """get dataset iterators""" if dataset == 'mnist': train_data, val_data = get_mnist_iterator(batch_size, (1, 28, 28), num_parts=kv.num_workers, part_index=kv.rank) elif dataset == 'cifar10': train_data, val_data = get_cifar10_iterator(batch_size, (3, 32, 32), num_parts=kv.num_workers, part_index=kv.rank) elif dataset == 'imagenet': shape_dim = 299 if model_name == 'inceptionv3' else 224 if not opt.data_dir: raise ValueError('Dir containing raw images in train/val is required for imagenet.' 'Please specify "--data-dir"') train_data, val_data = get_imagenet_iterator(opt.data_dir, batch_size, opt.num_workers, shape_dim, opt.dtype) elif dataset == 'caltech101': train_data, val_data = get_caltech101_iterator(batch_size, opt.num_workers, opt.dtype) elif dataset == 'dummy': shape_dim = 299 if model_name == 'inceptionv3' else 224 train_data, val_data = dummy_iterator(batch_size, (3, shape_dim, shape_dim)) return train_data, val_data
[ "get", "dataset", "iterators" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/image_classification.py#L138-L160
[ "def", "get_data_iters", "(", "dataset", ",", "batch_size", ",", "opt", ")", ":", "if", "dataset", "==", "'mnist'", ":", "train_data", ",", "val_data", "=", "get_mnist_iterator", "(", "batch_size", ",", "(", "1", ",", "28", ",", "28", ")", ",", "num_part...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
update_learning_rate
Set the learning rate to the initial value decayed by ratio every N epochs.
example/gluon/image_classification.py
def update_learning_rate(lr, trainer, epoch, ratio, steps): """Set the learning rate to the initial value decayed by ratio every N epochs.""" new_lr = lr * (ratio ** int(np.sum(np.array(steps) < epoch))) trainer.set_learning_rate(new_lr) return trainer
def update_learning_rate(lr, trainer, epoch, ratio, steps): """Set the learning rate to the initial value decayed by ratio every N epochs.""" new_lr = lr * (ratio ** int(np.sum(np.array(steps) < epoch))) trainer.set_learning_rate(new_lr) return trainer
[ "Set", "the", "learning", "rate", "to", "the", "initial", "value", "decayed", "by", "ratio", "every", "N", "epochs", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/image_classification.py#L174-L178
[ "def", "update_learning_rate", "(", "lr", ",", "trainer", ",", "epoch", ",", "ratio", ",", "steps", ")", ":", "new_lr", "=", "lr", "*", "(", "ratio", "**", "int", "(", "np", ".", "sum", "(", "np", ".", "array", "(", "steps", ")", "<", "epoch", ")...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
seed
Seeds the random number generators in MXNet. This affects the behavior of modules in MXNet that uses random number generators, like the dropout operator and `NDArray`'s random sampling operators. Parameters ---------- seed_state : int The random number seed. ctx : Context The device context of the generator. The default is "all" which means seeding random number generators of all devices. Notes ----- Random number generators in MXNet are device specific. `mx.random.seed(seed_state)` sets the state of each generator using `seed_state` and the device id. Therefore, random numbers generated from different devices can be different even if they are seeded using the same seed. To produce identical random number sequences independent of the device id, set optional `ctx` argument. This produces the same sequence of random numbers independent of the device id, but the sequence can be different on different kind of devices as MXNet's random number generators for CPU and GPU use different algorithms. Example ------- >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 1.36481571 -0.62203991] [-1.4962182 -0.08511394]] >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 1.09544981 -0.20014545] [-0.20808885 0.2527658 ]] # Same results on the same device with the same seed >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 0.47400656 -0.75213492] [ 0.20251541 0.95352972]] >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 0.47400656 -0.75213492] [ 0.20251541 0.95352972]] # Different results on gpu(0) and gpu(1) with the same seed >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy()) [[ 0.24336822 -1.664805 ] [-1.0223296 1.253198 ]] # Seeding with `ctx` argument produces identical results on gpu(0) and gpu(1) >>> mx.random.seed(128, ctx=mx.gpu(0)) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] >>> mx.random.seed(128, ctx=mx.gpu(1)) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]]
python/mxnet/random.py
def seed(seed_state, ctx="all"): """Seeds the random number generators in MXNet. This affects the behavior of modules in MXNet that uses random number generators, like the dropout operator and `NDArray`'s random sampling operators. Parameters ---------- seed_state : int The random number seed. ctx : Context The device context of the generator. The default is "all" which means seeding random number generators of all devices. Notes ----- Random number generators in MXNet are device specific. `mx.random.seed(seed_state)` sets the state of each generator using `seed_state` and the device id. Therefore, random numbers generated from different devices can be different even if they are seeded using the same seed. To produce identical random number sequences independent of the device id, set optional `ctx` argument. This produces the same sequence of random numbers independent of the device id, but the sequence can be different on different kind of devices as MXNet's random number generators for CPU and GPU use different algorithms. Example ------- >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 1.36481571 -0.62203991] [-1.4962182 -0.08511394]] >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 1.09544981 -0.20014545] [-0.20808885 0.2527658 ]] # Same results on the same device with the same seed >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 0.47400656 -0.75213492] [ 0.20251541 0.95352972]] >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 0.47400656 -0.75213492] [ 0.20251541 0.95352972]] # Different results on gpu(0) and gpu(1) with the same seed >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy()) [[ 0.24336822 -1.664805 ] [-1.0223296 1.253198 ]] # Seeding with `ctx` argument produces identical results on gpu(0) and gpu(1) >>> mx.random.seed(128, ctx=mx.gpu(0)) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] >>> mx.random.seed(128, ctx=mx.gpu(1)) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] """ if not isinstance(seed_state, integer_types): raise ValueError('seed_state must be int') seed_state = ctypes.c_int(int(seed_state)) if ctx == "all": check_call(_LIB.MXRandomSeed(seed_state)) else: ctx = Context(ctx) check_call(_LIB.MXRandomSeedContext(seed_state, ctx.device_typeid, ctx.device_id))
def seed(seed_state, ctx="all"): """Seeds the random number generators in MXNet. This affects the behavior of modules in MXNet that uses random number generators, like the dropout operator and `NDArray`'s random sampling operators. Parameters ---------- seed_state : int The random number seed. ctx : Context The device context of the generator. The default is "all" which means seeding random number generators of all devices. Notes ----- Random number generators in MXNet are device specific. `mx.random.seed(seed_state)` sets the state of each generator using `seed_state` and the device id. Therefore, random numbers generated from different devices can be different even if they are seeded using the same seed. To produce identical random number sequences independent of the device id, set optional `ctx` argument. This produces the same sequence of random numbers independent of the device id, but the sequence can be different on different kind of devices as MXNet's random number generators for CPU and GPU use different algorithms. Example ------- >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 1.36481571 -0.62203991] [-1.4962182 -0.08511394]] >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 1.09544981 -0.20014545] [-0.20808885 0.2527658 ]] # Same results on the same device with the same seed >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 0.47400656 -0.75213492] [ 0.20251541 0.95352972]] >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2)).asnumpy()) [[ 0.47400656 -0.75213492] [ 0.20251541 0.95352972]] # Different results on gpu(0) and gpu(1) with the same seed >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] >>> mx.random.seed(128) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy()) [[ 0.24336822 -1.664805 ] [-1.0223296 1.253198 ]] # Seeding with `ctx` argument produces identical results on gpu(0) and gpu(1) >>> mx.random.seed(128, ctx=mx.gpu(0)) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] >>> mx.random.seed(128, ctx=mx.gpu(1)) >>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy()) [[ 2.5020072 -1.6884501] [-0.7931333 -1.4218881]] """ if not isinstance(seed_state, integer_types): raise ValueError('seed_state must be int') seed_state = ctypes.c_int(int(seed_state)) if ctx == "all": check_call(_LIB.MXRandomSeed(seed_state)) else: ctx = Context(ctx) check_call(_LIB.MXRandomSeedContext(seed_state, ctx.device_typeid, ctx.device_id))
[ "Seeds", "the", "random", "number", "generators", "in", "MXNet", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/random.py#L30-L100
[ "def", "seed", "(", "seed_state", ",", "ctx", "=", "\"all\"", ")", ":", "if", "not", "isinstance", "(", "seed_state", ",", "integer_types", ")", ":", "raise", "ValueError", "(", "'seed_state must be int'", ")", "seed_state", "=", "ctypes", ".", "c_int", "(",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
random_uniform
Draw random samples from a uniform distribtuion.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def random_uniform(attrs, inputs, proto_obj): """Draw random samples from a uniform distribtuion.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._remove_attributes(attrs, ['seed']) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))] return 'random_uniform', new_attrs, inputs
def random_uniform(attrs, inputs, proto_obj): """Draw random samples from a uniform distribtuion.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._remove_attributes(attrs, ['seed']) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))] return 'random_uniform', new_attrs, inputs
[ "Draw", "random", "samples", "from", "a", "uniform", "distribtuion", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L30-L39
[ "def", "random_uniform", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "try", ":", "from", "onnx", ".", "mapping", "import", "TENSOR_TYPE_TO_NP_TYPE", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Onnx and protobuf need to be installed. \"",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
random_normal
Draw random samples from a Gaussian distribution.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def random_normal(attrs, inputs, proto_obj): """Draw random samples from a Gaussian distribution.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attr = translation_utils._remove_attributes(attrs, ['seed']) new_attr = translation_utils._fix_attribute_names(new_attr, {'mean': 'loc'}) new_attr['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attr.get('dtype', 1))] return 'random_normal', new_attr, inputs
def random_normal(attrs, inputs, proto_obj): """Draw random samples from a Gaussian distribution.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attr = translation_utils._remove_attributes(attrs, ['seed']) new_attr = translation_utils._fix_attribute_names(new_attr, {'mean': 'loc'}) new_attr['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attr.get('dtype', 1))] return 'random_normal', new_attr, inputs
[ "Draw", "random", "samples", "from", "a", "Gaussian", "distribution", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L41-L51
[ "def", "random_normal", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "try", ":", "from", "onnx", ".", "mapping", "import", "TENSOR_TYPE_TO_NP_TYPE", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Onnx and protobuf need to be installed. \"", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
add
Adding two tensors
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def add(attrs, inputs, proto_obj): """Adding two tensors""" new_attr = {} if 'broadcast' in attrs and attrs['broadcast'] == 1: broadcast_axis = attrs['axis'] op_value = translation_utils._fix_broadcast('broadcast_add', inputs, broadcast_axis, proto_obj) return op_value, new_attr, inputs return 'broadcast_add', new_attr, inputs
def add(attrs, inputs, proto_obj): """Adding two tensors""" new_attr = {} if 'broadcast' in attrs and attrs['broadcast'] == 1: broadcast_axis = attrs['axis'] op_value = translation_utils._fix_broadcast('broadcast_add', inputs, broadcast_axis, proto_obj) return op_value, new_attr, inputs return 'broadcast_add', new_attr, inputs
[ "Adding", "two", "tensors" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L66-L75
[ "def", "add", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attr", "=", "{", "}", "if", "'broadcast'", "in", "attrs", "and", "attrs", "[", "'broadcast'", "]", "==", "1", ":", "broadcast_axis", "=", "attrs", "[", "'axis'", "]", "op_value...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
mean
Mean of all the input tensors.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def mean(attrs, inputs, proto_obj): """Mean of all the input tensors.""" concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs] concat_sym = symbol.concat(*concat_input, dim=0) mean_sym = symbol.mean(concat_sym, axis=0) return mean_sym, attrs, inputs
def mean(attrs, inputs, proto_obj): """Mean of all the input tensors.""" concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs] concat_sym = symbol.concat(*concat_input, dim=0) mean_sym = symbol.mean(concat_sym, axis=0) return mean_sym, attrs, inputs
[ "Mean", "of", "all", "the", "input", "tensors", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L110-L115
[ "def", "mean", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "concat_input", "=", "[", "symbol", ".", "expand_dims", "(", "op_input", ",", "axis", "=", "0", ")", "for", "op_input", "in", "inputs", "]", "concat_sym", "=", "symbol", ".", "conc...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
argmax
Returns indices of the maximum values along an axis
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def argmax(attrs, inputs, proto_obj): """Returns indices of the maximum values along an axis""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmax_op
def argmax(attrs, inputs, proto_obj): """Returns indices of the maximum values along an axis""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmax_op
[ "Returns", "indices", "of", "the", "maximum", "values", "along", "an", "axis" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L146-L153
[ "def", "argmax", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "axis", "=", "attrs", ".", "get", "(", "'axis'", ",", "0", ")", "keepdims", "=", "attrs", ".", "get", "(", "'keepdims'", ",", "1", ")", "argmax_op", "=", "symbol", ".", "argm...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
argmin
Returns indices of the minimum values along an axis.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def argmin(attrs, inputs, proto_obj): """Returns indices of the minimum values along an axis.""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmin_op
def argmin(attrs, inputs, proto_obj): """Returns indices of the minimum values along an axis.""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operator always expects int64 as output type cast_attrs = {'dtype': 'int64'} return 'cast', cast_attrs, argmin_op
[ "Returns", "indices", "of", "the", "minimum", "values", "along", "an", "axis", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L155-L162
[ "def", "argmin", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "axis", "=", "attrs", ".", "get", "(", "'axis'", ",", "0", ")", "keepdims", "=", "attrs", ".", "get", "(", "'keepdims'", ",", "1", ")", "argmin_op", "=", "symbol", ".", "argm...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
maximum
Elementwise maximum of arrays. MXNet maximum compares only two symbols at a time. ONNX can send more than two to compare. Breaking into multiple mxnet ops to compare two symbols at a time
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def maximum(attrs, inputs, proto_obj): """ Elementwise maximum of arrays. MXNet maximum compares only two symbols at a time. ONNX can send more than two to compare. Breaking into multiple mxnet ops to compare two symbols at a time """ if len(inputs) > 1: mxnet_op = symbol.maximum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.maximum(mxnet_op, op_input) else: mxnet_op = symbol.maximum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs
def maximum(attrs, inputs, proto_obj): """ Elementwise maximum of arrays. MXNet maximum compares only two symbols at a time. ONNX can send more than two to compare. Breaking into multiple mxnet ops to compare two symbols at a time """ if len(inputs) > 1: mxnet_op = symbol.maximum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.maximum(mxnet_op, op_input) else: mxnet_op = symbol.maximum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs
[ "Elementwise", "maximum", "of", "arrays", ".", "MXNet", "maximum", "compares", "only", "two", "symbols", "at", "a", "time", ".", "ONNX", "can", "send", "more", "than", "two", "to", "compare", ".", "Breaking", "into", "multiple", "mxnet", "ops", "to", "comp...
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L164-L177
[ "def", "maximum", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "if", "len", "(", "inputs", ")", ">", "1", ":", "mxnet_op", "=", "symbol", ".", "maximum", "(", "inputs", "[", "0", "]", ",", "inputs", "[", "1", "]", ")", "for", "op_inpu...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
minimum
Elementwise minimum of arrays.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def minimum(attrs, inputs, proto_obj): """Elementwise minimum of arrays.""" # MXNet minimum compares only two symbols at a time. # ONNX can send more than two to compare. # Breaking into multiple mxnet ops to compare two symbols at a time if len(inputs) > 1: mxnet_op = symbol.minimum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.minimum(mxnet_op, op_input) else: mxnet_op = symbol.minimum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs
def minimum(attrs, inputs, proto_obj): """Elementwise minimum of arrays.""" # MXNet minimum compares only two symbols at a time. # ONNX can send more than two to compare. # Breaking into multiple mxnet ops to compare two symbols at a time if len(inputs) > 1: mxnet_op = symbol.minimum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.minimum(mxnet_op, op_input) else: mxnet_op = symbol.minimum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs
[ "Elementwise", "minimum", "of", "arrays", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L179-L190
[ "def", "minimum", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "# MXNet minimum compares only two symbols at a time.", "# ONNX can send more than two to compare.", "# Breaking into multiple mxnet ops to compare two symbols at a time", "if", "len", "(", "inputs", ")", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
concat
Joins input arrays along a given axis.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def concat(attrs, inputs, proto_obj): """ Joins input arrays along a given axis. """ new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'}) return 'concat', new_attrs, inputs
def concat(attrs, inputs, proto_obj): """ Joins input arrays along a given axis. """ new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'}) return 'concat', new_attrs, inputs
[ "Joins", "input", "arrays", "along", "a", "given", "axis", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L219-L222
[ "def", "concat", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'axis'", ":", "'dim'", "}", ")", "return", "'concat'", ",", "new_attrs", ",", "inputs" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
pad
Add padding to input tensor
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def pad(attrs, inputs, proto_obj): """ Add padding to input tensor""" new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width', 'value' : 'constant_value' }) new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width')) return 'pad', new_attrs, inputs
def pad(attrs, inputs, proto_obj): """ Add padding to input tensor""" new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width', 'value' : 'constant_value' }) new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width')) return 'pad', new_attrs, inputs
[ "Add", "padding", "to", "input", "tensor" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L241-L247
[ "def", "pad", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'pads'", ":", "'pad_width'", ",", "'value'", ":", "'constant_value'", "}", ")", "new_attrs", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
batch_norm
Batch normalization.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def batch_norm(attrs, inputs, proto_obj): """Batch normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps', 'is_test': 'fix_gamma'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['spatial', 'consumed_inputs']) # Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5) cudnn_min_eps = 1e-5 cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1 new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off}) # in test mode "fix_gamma" should be unset. new_attrs['fix_gamma'] = not attrs.get('is_test', 1) return 'BatchNorm', new_attrs, inputs
def batch_norm(attrs, inputs, proto_obj): """Batch normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps', 'is_test': 'fix_gamma'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['spatial', 'consumed_inputs']) # Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5) cudnn_min_eps = 1e-5 cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1 new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off}) # in test mode "fix_gamma" should be unset. new_attrs['fix_gamma'] = not attrs.get('is_test', 1) return 'BatchNorm', new_attrs, inputs
[ "Batch", "normalization", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L253-L266
[ "def", "batch_norm", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'epsilon'", ":", "'eps'", ",", "'is_test'", ":", "'fix_gamma'", "}", ")", "new_attrs", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
instance_norm
Instance Normalization.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def instance_norm(attrs, inputs, proto_obj): """Instance Normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'}) new_attrs['eps'] = attrs.get('epsilon', 1e-5) return 'InstanceNorm', new_attrs, inputs
def instance_norm(attrs, inputs, proto_obj): """Instance Normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'}) new_attrs['eps'] = attrs.get('epsilon', 1e-5) return 'InstanceNorm', new_attrs, inputs
[ "Instance", "Normalization", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L268-L272
[ "def", "instance_norm", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'epsilon'", ":", "'eps'", "}", ")", "new_attrs", "[", "'eps'", "]", "=", "attrs", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
leaky_relu
Leaky Relu function
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def leaky_relu(attrs, inputs, proto_obj): """Leaky Relu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01}) return 'LeakyReLU', new_attrs, inputs
def leaky_relu(attrs, inputs, proto_obj): """Leaky Relu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01}) return 'LeakyReLU', new_attrs, inputs
[ "Leaky", "Relu", "function" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L274-L280
[ "def", "leaky_relu", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "if", "'alpha'", "in", "attrs", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'alpha'", ":", "'slope'", "}", ")", "else", ":",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
_elu
Elu function
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def _elu(attrs, inputs, proto_obj): """Elu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'}) return 'LeakyReLU', new_attrs, inputs
def _elu(attrs, inputs, proto_obj): """Elu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'}) return 'LeakyReLU', new_attrs, inputs
[ "Elu", "function" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L282-L289
[ "def", "_elu", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "if", "'alpha'", "in", "attrs", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'alpha'", ":", "'slope'", "}", ")", "else", ":", "ne...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
_prelu
PRelu function
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def _prelu(attrs, inputs, proto_obj): """PRelu function""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'}) return 'LeakyReLU', new_attrs, inputs
def _prelu(attrs, inputs, proto_obj): """PRelu function""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'}) return 'LeakyReLU', new_attrs, inputs
[ "PRelu", "function" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L291-L294
[ "def", "_prelu", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'act_type'", ":", "'prelu'", "}", ")", "return", "'LeakyReLU'", ",", "new_attrs", ",", "in...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
_selu
Selu function
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def _selu(attrs, inputs, proto_obj): """Selu function""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'}) return 'LeakyReLU', new_attrs, inputs
def _selu(attrs, inputs, proto_obj): """Selu function""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'}) return 'LeakyReLU', new_attrs, inputs
[ "Selu", "function" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L296-L299
[ "def", "_selu", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'act_type'", ":", "'selu'", "}", ")", "return", "'LeakyReLU'", ",", "new_attrs", ",", "inpu...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
softmax
Softmax function.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def softmax(attrs, inputs, proto_obj): """Softmax function.""" if 'axis' not in attrs: attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1}) return 'softmax', attrs, inputs
def softmax(attrs, inputs, proto_obj): """Softmax function.""" if 'axis' not in attrs: attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1}) return 'softmax', attrs, inputs
[ "Softmax", "function", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L301-L305
[ "def", "softmax", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "if", "'axis'", "not", "in", "attrs", ":", "attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'axis'", ":", "1", "}", ")", "return", "'softm...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
softplus
Applies the sofplus activation function element-wise to the input.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def softplus(attrs, inputs, proto_obj): """Applies the sofplus activation function element-wise to the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'}) return 'Activation', new_attrs, inputs
def softplus(attrs, inputs, proto_obj): """Applies the sofplus activation function element-wise to the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'}) return 'Activation', new_attrs, inputs
[ "Applies", "the", "sofplus", "activation", "function", "element", "-", "wise", "to", "the", "input", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L312-L315
[ "def", "softplus", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'act_type'", ":", "'softrelu'", "}", ")", "return", "'Activation'", ",", "new_attrs", ",",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
conv
Compute N-D convolution on (N+2)-D input.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def conv(attrs, inputs, proto_obj): """Compute N-D convolution on (N+2)-D input.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel', 'strides' : 'stride', 'pads': 'pad', 'dilations': 'dilate', 'group': 'num_group'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1}) new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, proto_obj) kernel = new_attrs['kernel'] stride = new_attrs['stride'] if 'stride' in new_attrs else [] padding = new_attrs['pad'] if 'pad' in new_attrs else [] dilations = new_attrs['dilate'] if 'dilate' in new_attrs else [] num_filter = new_attrs['num_filter'] num_group = new_attrs['num_group'] no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else 0 bias = None if no_bias is True else inputs[2] # Unlike ONNX, MXNet's convolution operator does not support asymmetric padding, so we first # use 'Pad' operator, which supports asymmetric padding. Then use the convolution operator. pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel)) pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width) conv_op = symbol.Convolution(pad_op, inputs[1], bias, kernel=kernel, stride=stride, dilate=dilations, num_filter=num_filter, num_group=num_group, no_bias=no_bias) return conv_op, new_attrs, inputs
def conv(attrs, inputs, proto_obj): """Compute N-D convolution on (N+2)-D input.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel', 'strides' : 'stride', 'pads': 'pad', 'dilations': 'dilate', 'group': 'num_group'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1}) new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, proto_obj) kernel = new_attrs['kernel'] stride = new_attrs['stride'] if 'stride' in new_attrs else [] padding = new_attrs['pad'] if 'pad' in new_attrs else [] dilations = new_attrs['dilate'] if 'dilate' in new_attrs else [] num_filter = new_attrs['num_filter'] num_group = new_attrs['num_group'] no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else 0 bias = None if no_bias is True else inputs[2] # Unlike ONNX, MXNet's convolution operator does not support asymmetric padding, so we first # use 'Pad' operator, which supports asymmetric padding. Then use the convolution operator. pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel)) pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width) conv_op = symbol.Convolution(pad_op, inputs[1], bias, kernel=kernel, stride=stride, dilate=dilations, num_filter=num_filter, num_group=num_group, no_bias=no_bias) return conv_op, new_attrs, inputs
[ "Compute", "N", "-", "D", "convolution", "on", "(", "N", "+", "2", ")", "-", "D", "input", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L317-L346
[ "def", "conv", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'kernel_shape'", ":", "'kernel'", ",", "'strides'", ":", "'stride'", ",", "'pads'", ":", "'pa...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
deconv
Computes transposed convolution of the input tensor.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def deconv(attrs, inputs, proto_obj): """Computes transposed convolution of the input tensor.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel', 'strides' : 'stride', 'pads': 'pad', 'dilations': 'dilate', 'group': 'num_group'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1}) new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj) kernel = new_attrs['kernel'] stride = new_attrs['stride'] if 'stride' in new_attrs else [] padding = new_attrs['pad'] if 'pad' in new_attrs else [] dilations = new_attrs['dilate'] if 'dilate' in new_attrs else [] num_filter = new_attrs['num_filter'] num_group = new_attrs['num_group'] no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False bias = None if no_bias is True else inputs[2] # Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first # use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator. pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel)) pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width) deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias, kernel=kernel, stride=stride, dilate=dilations, num_filter=num_filter, num_group=num_group, no_bias=no_bias) return deconv_op, new_attrs, inputs
def deconv(attrs, inputs, proto_obj): """Computes transposed convolution of the input tensor.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel', 'strides' : 'stride', 'pads': 'pad', 'dilations': 'dilate', 'group': 'num_group'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1}) new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj) kernel = new_attrs['kernel'] stride = new_attrs['stride'] if 'stride' in new_attrs else [] padding = new_attrs['pad'] if 'pad' in new_attrs else [] dilations = new_attrs['dilate'] if 'dilate' in new_attrs else [] num_filter = new_attrs['num_filter'] num_group = new_attrs['num_group'] no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False bias = None if no_bias is True else inputs[2] # Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first # use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator. pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel)) pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width) deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias, kernel=kernel, stride=stride, dilate=dilations, num_filter=num_filter, num_group=num_group, no_bias=no_bias) return deconv_op, new_attrs, inputs
[ "Computes", "transposed", "convolution", "of", "the", "input", "tensor", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L348-L377
[ "def", "deconv", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'kernel_shape'", ":", "'kernel'", ",", "'strides'", ":", "'stride'", ",", "'pads'", ":", "'...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
fully_connected
Applies a linear transformation: Y=XWT+b.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def fully_connected(attrs, inputs, proto_obj): """Applies a linear transformation: Y=XWT+b.""" new_attrs = translation_utils._remove_attributes(attrs, ['axis']) new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, proto_obj) return 'FullyConnected', new_attrs, inputs
def fully_connected(attrs, inputs, proto_obj): """Applies a linear transformation: Y=XWT+b.""" new_attrs = translation_utils._remove_attributes(attrs, ['axis']) new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs)) new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, proto_obj) return 'FullyConnected', new_attrs, inputs
[ "Applies", "a", "linear", "transformation", ":", "Y", "=", "XWT", "+", "b", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L379-L387
[ "def", "fully_connected", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_remove_attributes", "(", "attrs", ",", "[", "'axis'", "]", ")", "new_attrs", "=", "translation_utils", ".", "_fix_bias", "(", "'Ful...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
global_maxpooling
Performs max pooling on the input.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def global_maxpooling(attrs, inputs, proto_obj): """Performs max pooling on the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'max'}) return 'Pooling', new_attrs, inputs
def global_maxpooling(attrs, inputs, proto_obj): """Performs max pooling on the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'max'}) return 'Pooling', new_attrs, inputs
[ "Performs", "max", "pooling", "on", "the", "input", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L390-L395
[ "def", "global_maxpooling", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'global_pool'", ":", "True", ",", "'kernel'", ":", "(", "1", ",", "1", ")", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
global_avgpooling
Performs avg pooling on the input.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def global_avgpooling(attrs, inputs, proto_obj): """Performs avg pooling on the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'avg'}) return 'Pooling', new_attrs, inputs
def global_avgpooling(attrs, inputs, proto_obj): """Performs avg pooling on the input.""" new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'avg'}) return 'Pooling', new_attrs, inputs
[ "Performs", "avg", "pooling", "on", "the", "input", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L398-L403
[ "def", "global_avgpooling", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'global_pool'", ":", "True", ",", "'kernel'", ":", "(", "1", ",", "1", ")", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
global_lppooling
Performs global lp pooling on the input.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def global_lppooling(attrs, inputs, proto_obj): """Performs global lp pooling on the input.""" p_value = attrs.get('p', 2) new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'lp', 'p_value': p_value}) new_attrs = translation_utils._remove_attributes(new_attrs, ['p']) return 'Pooling', new_attrs, inputs
def global_lppooling(attrs, inputs, proto_obj): """Performs global lp pooling on the input.""" p_value = attrs.get('p', 2) new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True, 'kernel': (1, 1), 'pool_type': 'lp', 'p_value': p_value}) new_attrs = translation_utils._remove_attributes(new_attrs, ['p']) return 'Pooling', new_attrs, inputs
[ "Performs", "global", "lp", "pooling", "on", "the", "input", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L405-L413
[ "def", "global_lppooling", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "p_value", "=", "attrs", ".", "get", "(", "'p'", ",", "2", ")", "new_attrs", "=", "translation_utils", ".", "_add_extra_attributes", "(", "attrs", ",", "{", "'global_pool'", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
linalg_gemm
Performs general matrix multiplication and accumulation
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def linalg_gemm(attrs, inputs, proto_obj): """Performs general matrix multiplication and accumulation""" trans_a = 0 trans_b = 0 alpha = 1 beta = 1 if 'transA' in attrs: trans_a = attrs['transA'] if 'transB' in attrs: trans_b = attrs['transB'] if 'alpha' in attrs: alpha = attrs['alpha'] if 'beta' in attrs: beta = attrs['beta'] flatten_a = symbol.flatten(inputs[0]) matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1], transpose_a=trans_a, transpose_b=trans_b, alpha=alpha) gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2]) new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a', 'transB': 'transpose_b'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast']) return gemm_op, new_attrs, inputs
def linalg_gemm(attrs, inputs, proto_obj): """Performs general matrix multiplication and accumulation""" trans_a = 0 trans_b = 0 alpha = 1 beta = 1 if 'transA' in attrs: trans_a = attrs['transA'] if 'transB' in attrs: trans_b = attrs['transB'] if 'alpha' in attrs: alpha = attrs['alpha'] if 'beta' in attrs: beta = attrs['beta'] flatten_a = symbol.flatten(inputs[0]) matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1], transpose_a=trans_a, transpose_b=trans_b, alpha=alpha) gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2]) new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a', 'transB': 'transpose_b'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast']) return gemm_op, new_attrs, inputs
[ "Performs", "general", "matrix", "multiplication", "and", "accumulation" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L415-L437
[ "def", "linalg_gemm", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "trans_a", "=", "0", "trans_b", "=", "0", "alpha", "=", "1", "beta", "=", "1", "if", "'transA'", "in", "attrs", ":", "trans_a", "=", "attrs", "[", "'transA'", "]", "if", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
local_response_norm
Local Response Normalization.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def local_response_norm(attrs, inputs, proto_obj): """Local Response Normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'bias': 'knorm', 'size' : 'nsize'}) return 'LRN', new_attrs, inputs
def local_response_norm(attrs, inputs, proto_obj): """Local Response Normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'bias': 'knorm', 'size' : 'nsize'}) return 'LRN', new_attrs, inputs
[ "Local", "Response", "Normalization", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L439-L444
[ "def", "local_response_norm", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'bias'", ":", "'knorm'", ",", "'size'", ":", "'nsize'", "}", ")", "return", "'...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
dropout
Dropout Regularization.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def dropout(attrs, inputs, proto_obj): """Dropout Regularization.""" mode = 'training' if 'is_test' in attrs and attrs['is_test'] == 0: mode = 'always' new_attrs = translation_utils._fix_attribute_names(attrs, {'ratio': 'p'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test']) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode}) return 'Dropout', new_attrs, inputs
def dropout(attrs, inputs, proto_obj): """Dropout Regularization.""" mode = 'training' if 'is_test' in attrs and attrs['is_test'] == 0: mode = 'always' new_attrs = translation_utils._fix_attribute_names(attrs, {'ratio': 'p'}) new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test']) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode}) return 'Dropout', new_attrs, inputs
[ "Dropout", "Regularization", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L446-L455
[ "def", "dropout", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "mode", "=", "'training'", "if", "'is_test'", "in", "attrs", "and", "attrs", "[", "'is_test'", "]", "==", "0", ":", "mode", "=", "'always'", "new_attrs", "=", "translation_utils", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
reshape
Reshape the given array by the shape attribute.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def reshape(attrs, inputs, proto_obj): """Reshape the given array by the shape attribute.""" if len(inputs) == 1: return 'reshape', attrs, inputs[0] reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy()) reshape_shape = [int(i) for i in reshape_shape] new_attrs = {'shape': reshape_shape} return 'reshape', new_attrs, inputs[:1]
def reshape(attrs, inputs, proto_obj): """Reshape the given array by the shape attribute.""" if len(inputs) == 1: return 'reshape', attrs, inputs[0] reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy()) reshape_shape = [int(i) for i in reshape_shape] new_attrs = {'shape': reshape_shape} return 'reshape', new_attrs, inputs[:1]
[ "Reshape", "the", "given", "array", "by", "the", "shape", "attribute", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L458-L465
[ "def", "reshape", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "if", "len", "(", "inputs", ")", "==", "1", ":", "return", "'reshape'", ",", "attrs", ",", "inputs", "[", "0", "]", "reshape_shape", "=", "list", "(", "proto_obj", ".", "_para...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
cast
Cast input to a given dtype
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def cast(attrs, inputs, proto_obj): """ Cast input to a given dtype""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'}) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])] return 'cast', new_attrs, inputs
def cast(attrs, inputs, proto_obj): """ Cast input to a given dtype""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'}) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])] return 'cast', new_attrs, inputs
[ "Cast", "input", "to", "a", "given", "dtype" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L467-L476
[ "def", "cast", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "try", ":", "from", "onnx", ".", "mapping", "import", "TENSOR_TYPE_TO_NP_TYPE", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Onnx and protobuf need to be installed. \"", "+", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
split
Splits an array along a particular axis into multiple sub-arrays.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def split(attrs, inputs, proto_obj): """Splits an array along a particular axis into multiple sub-arrays.""" split_list = attrs.get('split') if 'split' in attrs else [] new_attrs = translation_utils._fix_attribute_names(attrs, {'split' : 'num_outputs'}) if 'axis' not in attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0}) if not split_list: num_outputs = len(proto_obj.model_metadata.get('output_tensor_data')) else: if len(set(split_list)) == 1: num_outputs = len(split_list) else: raise NotImplementedError("Operator {} in MXNet does not support variable splits." "Tracking the issue to support variable split here: " "https://github.com/apache/incubator-mxnet/issues/11594" .format('split')) new_attrs['num_outputs'] = num_outputs return 'split', new_attrs, inputs
def split(attrs, inputs, proto_obj): """Splits an array along a particular axis into multiple sub-arrays.""" split_list = attrs.get('split') if 'split' in attrs else [] new_attrs = translation_utils._fix_attribute_names(attrs, {'split' : 'num_outputs'}) if 'axis' not in attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0}) if not split_list: num_outputs = len(proto_obj.model_metadata.get('output_tensor_data')) else: if len(set(split_list)) == 1: num_outputs = len(split_list) else: raise NotImplementedError("Operator {} in MXNet does not support variable splits." "Tracking the issue to support variable split here: " "https://github.com/apache/incubator-mxnet/issues/11594" .format('split')) new_attrs['num_outputs'] = num_outputs return 'split', new_attrs, inputs
[ "Splits", "an", "array", "along", "a", "particular", "axis", "into", "multiple", "sub", "-", "arrays", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L478-L498
[ "def", "split", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "split_list", "=", "attrs", ".", "get", "(", "'split'", ")", "if", "'split'", "in", "attrs", "else", "[", "]", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(",...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
_slice
Returns a slice of the input tensor along multiple axes.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def _slice(attrs, inputs, proto_obj): """Returns a slice of the input tensor along multiple axes.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis', 'ends' : 'end', 'starts' : 'begin'}) # onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator # for multiple axes from mxnet begin = new_attrs.get('begin') end = new_attrs.get('end') axes = new_attrs.get('axis', tuple(range(len(begin)))) slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0]) if len(axes) > 1: for i, axis in enumerate(axes): slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i]) return slice_op, new_attrs, inputs
def _slice(attrs, inputs, proto_obj): """Returns a slice of the input tensor along multiple axes.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis', 'ends' : 'end', 'starts' : 'begin'}) # onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator # for multiple axes from mxnet begin = new_attrs.get('begin') end = new_attrs.get('end') axes = new_attrs.get('axis', tuple(range(len(begin)))) slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0]) if len(axes) > 1: for i, axis in enumerate(axes): slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i]) return slice_op, new_attrs, inputs
[ "Returns", "a", "slice", "of", "the", "input", "tensor", "along", "multiple", "axes", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L500-L515
[ "def", "_slice", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'axes'", ":", "'axis'", ",", "'ends'", ":", "'end'", ",", "'starts'", ":", "'begin'", "}"...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
transpose
Transpose the input array.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def transpose(attrs, inputs, proto_obj): """Transpose the input array.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'perm' : 'axes'}) return 'transpose', new_attrs, inputs
def transpose(attrs, inputs, proto_obj): """Transpose the input array.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'perm' : 'axes'}) return 'transpose', new_attrs, inputs
[ "Transpose", "the", "input", "array", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L517-L521
[ "def", "transpose", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'perm'", ":", "'axes'", "}", ")", "return", "'transpose'", ",", "new_attrs", ",", "input...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
squeeze
Remove single-dimensional entries from the shape of a tensor.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def squeeze(attrs, inputs, proto_obj): """Remove single-dimensional entries from the shape of a tensor.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis'}) return 'squeeze', new_attrs, inputs
def squeeze(attrs, inputs, proto_obj): """Remove single-dimensional entries from the shape of a tensor.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes' : 'axis'}) return 'squeeze', new_attrs, inputs
[ "Remove", "single", "-", "dimensional", "entries", "from", "the", "shape", "of", "a", "tensor", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L523-L527
[ "def", "squeeze", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'axes'", ":", "'axis'", "}", ")", "return", "'squeeze'", ",", "new_attrs", ",", "inputs" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
unsqueeze
Inserts a new axis of size 1 into the array shape
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def unsqueeze(attrs, inputs, cls): """Inserts a new axis of size 1 into the array shape""" # MXNet can only add one axis at a time. mxnet_op = inputs[0] for axis in attrs["axes"]: mxnet_op = symbol.expand_dims(mxnet_op, axis=axis) return mxnet_op, attrs, inputs
def unsqueeze(attrs, inputs, cls): """Inserts a new axis of size 1 into the array shape""" # MXNet can only add one axis at a time. mxnet_op = inputs[0] for axis in attrs["axes"]: mxnet_op = symbol.expand_dims(mxnet_op, axis=axis) return mxnet_op, attrs, inputs
[ "Inserts", "a", "new", "axis", "of", "size", "1", "into", "the", "array", "shape" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L529-L536
[ "def", "unsqueeze", "(", "attrs", ",", "inputs", ",", "cls", ")", ":", "# MXNet can only add one axis at a time.", "mxnet_op", "=", "inputs", "[", "0", "]", "for", "axis", "in", "attrs", "[", "\"axes\"", "]", ":", "mxnet_op", "=", "symbol", ".", "expand_dims...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
flatten
Flattens the input array into a 2-D array by collapsing the higher dimensions.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def flatten(attrs, inputs, proto_obj): """Flattens the input array into a 2-D array by collapsing the higher dimensions.""" #Mxnet does not have axis support. By default uses axis=1 if 'axis' in attrs and attrs['axis'] != 1: raise RuntimeError("Flatten operator only supports axis=1") new_attrs = translation_utils._remove_attributes(attrs, ['axis']) return 'Flatten', new_attrs, inputs
def flatten(attrs, inputs, proto_obj): """Flattens the input array into a 2-D array by collapsing the higher dimensions.""" #Mxnet does not have axis support. By default uses axis=1 if 'axis' in attrs and attrs['axis'] != 1: raise RuntimeError("Flatten operator only supports axis=1") new_attrs = translation_utils._remove_attributes(attrs, ['axis']) return 'Flatten', new_attrs, inputs
[ "Flattens", "the", "input", "array", "into", "a", "2", "-", "D", "array", "by", "collapsing", "the", "higher", "dimensions", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L538-L544
[ "def", "flatten", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "#Mxnet does not have axis support. By default uses axis=1", "if", "'axis'", "in", "attrs", "and", "attrs", "[", "'axis'", "]", "!=", "1", ":", "raise", "RuntimeError", "(", "\"Flatten oper...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
clip
Clips (limits) the values in an array.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def clip(attrs, inputs, proto_obj): """Clips (limits) the values in an array.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min', 'max' : 'a_max'}) if 'a_max' not in new_attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf}) if 'a_min' not in new_attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf}) return 'clip', new_attrs, inputs
def clip(attrs, inputs, proto_obj): """Clips (limits) the values in an array.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min', 'max' : 'a_max'}) if 'a_max' not in new_attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf}) if 'a_min' not in new_attrs: new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf}) return 'clip', new_attrs, inputs
[ "Clips", "(", "limits", ")", "the", "values", "in", "an", "array", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L546-L554
[ "def", "clip", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'min'", ":", "'a_min'", ",", "'max'", ":", "'a_max'", "}", ")", "if", "'a_max'", "not", "...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
power
Returns element-wise result of base element raised to powers from exp element.
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def power(attrs, inputs, proto_obj): """Returns element-wise result of base element raised to powers from exp element.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'}) if 'broadcast' in attrs: new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast']) if attrs['broadcast'] == 1: return 'broadcast_power', new_attrs, inputs else: mxnet_op = symbol.pow(inputs[0], inputs[1]) return mxnet_op, new_attrs, inputs mxnet_op = symbol.broadcast_power(inputs[0], inputs[1]) return mxnet_op, new_attrs, inputs
def power(attrs, inputs, proto_obj): """Returns element-wise result of base element raised to powers from exp element.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'}) if 'broadcast' in attrs: new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast']) if attrs['broadcast'] == 1: return 'broadcast_power', new_attrs, inputs else: mxnet_op = symbol.pow(inputs[0], inputs[1]) return mxnet_op, new_attrs, inputs mxnet_op = symbol.broadcast_power(inputs[0], inputs[1]) return mxnet_op, new_attrs, inputs
[ "Returns", "element", "-", "wise", "result", "of", "base", "element", "raised", "to", "powers", "from", "exp", "element", "." ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L569-L580
[ "def", "power", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'exponent'", ":", "'exp'", "}", ")", "if", "'broadcast'", "in", "attrs", ":", "new_attrs", ...
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
train
reduce_max
Reduce the array along a given axis by maximum value
python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
def reduce_max(attrs, inputs, proto_obj): """Reduce the array along a given axis by maximum value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'max', new_attrs, inputs
def reduce_max(attrs, inputs, proto_obj): """Reduce the array along a given axis by maximum value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'max', new_attrs, inputs
[ "Reduce", "the", "array", "along", "a", "given", "axis", "by", "maximum", "value" ]
apache/incubator-mxnet
python
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/_op_translations.py#L615-L618
[ "def", "reduce_max", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'axes'", ":", "'axis'", "}", ")", "return", "'max'", ",", "new_attrs", ",", "inputs" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7