id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
23,500 | apache/incubator-mxnet | python/mxnet/contrib/svrg_optimization/svrg_module.py | SVRGModule.init_optimizer | def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes SVRGOptimizer. The SVRGOptimizer is a wrapper class for a regular optimizer that is
passed in and a special AssignmentOptimizer to accumulate the full gradients. If KVStore is 'local' or None,
the full gradients will be accumulated locally without pushing to the KVStore. Otherwise, additional keys will
be pushed to accumulate the full gradients in the KVStore.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
# Init dict for storing average of full gradients for each device
self._param_dict = [{key: mx.nd.zeros(shape=value.shape, ctx=self._context[i])
for key, value in self.get_params()[0].items()} for i in range(self._ctx_len)]
svrg_optimizer = self._create_optimizer(_SVRGOptimizer.__name__, default_opt=optimizer,
kvstore=kvstore, optimizer_params=optimizer_params)
super(SVRGModule, self).init_optimizer(kvstore=kvstore, optimizer=svrg_optimizer,
optimizer_params=optimizer_params, force_init=force_init)
# Init additional keys for accumulating full grads in KVStore
if self._kvstore:
for idx, param_on_devs in enumerate(self._exec_group.param_arrays):
name = self._exec_group.param_names[idx]
self._kvstore.init(name + "_full", mx.nd.zeros(shape=self._arg_params[name].shape))
if self._update_on_kvstore:
self._kvstore.pull(name + "_full", param_on_devs, priority=-idx) | python | def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes SVRGOptimizer. The SVRGOptimizer is a wrapper class for a regular optimizer that is
passed in and a special AssignmentOptimizer to accumulate the full gradients. If KVStore is 'local' or None,
the full gradients will be accumulated locally without pushing to the KVStore. Otherwise, additional keys will
be pushed to accumulate the full gradients in the KVStore.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
# Init dict for storing average of full gradients for each device
self._param_dict = [{key: mx.nd.zeros(shape=value.shape, ctx=self._context[i])
for key, value in self.get_params()[0].items()} for i in range(self._ctx_len)]
svrg_optimizer = self._create_optimizer(_SVRGOptimizer.__name__, default_opt=optimizer,
kvstore=kvstore, optimizer_params=optimizer_params)
super(SVRGModule, self).init_optimizer(kvstore=kvstore, optimizer=svrg_optimizer,
optimizer_params=optimizer_params, force_init=force_init)
# Init additional keys for accumulating full grads in KVStore
if self._kvstore:
for idx, param_on_devs in enumerate(self._exec_group.param_arrays):
name = self._exec_group.param_names[idx]
self._kvstore.init(name + "_full", mx.nd.zeros(shape=self._arg_params[name].shape))
if self._update_on_kvstore:
self._kvstore.pull(name + "_full", param_on_devs, priority=-idx) | [
"def",
"init_optimizer",
"(",
"self",
",",
"kvstore",
"=",
"'local'",
",",
"optimizer",
"=",
"'sgd'",
",",
"optimizer_params",
"=",
"(",
"(",
"'learning_rate'",
",",
"0.01",
")",
",",
")",
",",
"force_init",
"=",
"False",
")",
":",
"# Init dict for storing average of full gradients for each device",
"self",
".",
"_param_dict",
"=",
"[",
"{",
"key",
":",
"mx",
".",
"nd",
".",
"zeros",
"(",
"shape",
"=",
"value",
".",
"shape",
",",
"ctx",
"=",
"self",
".",
"_context",
"[",
"i",
"]",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"get_params",
"(",
")",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_ctx_len",
")",
"]",
"svrg_optimizer",
"=",
"self",
".",
"_create_optimizer",
"(",
"_SVRGOptimizer",
".",
"__name__",
",",
"default_opt",
"=",
"optimizer",
",",
"kvstore",
"=",
"kvstore",
",",
"optimizer_params",
"=",
"optimizer_params",
")",
"super",
"(",
"SVRGModule",
",",
"self",
")",
".",
"init_optimizer",
"(",
"kvstore",
"=",
"kvstore",
",",
"optimizer",
"=",
"svrg_optimizer",
",",
"optimizer_params",
"=",
"optimizer_params",
",",
"force_init",
"=",
"force_init",
")",
"# Init additional keys for accumulating full grads in KVStore",
"if",
"self",
".",
"_kvstore",
":",
"for",
"idx",
",",
"param_on_devs",
"in",
"enumerate",
"(",
"self",
".",
"_exec_group",
".",
"param_arrays",
")",
":",
"name",
"=",
"self",
".",
"_exec_group",
".",
"param_names",
"[",
"idx",
"]",
"self",
".",
"_kvstore",
".",
"init",
"(",
"name",
"+",
"\"_full\"",
",",
"mx",
".",
"nd",
".",
"zeros",
"(",
"shape",
"=",
"self",
".",
"_arg_params",
"[",
"name",
"]",
".",
"shape",
")",
")",
"if",
"self",
".",
"_update_on_kvstore",
":",
"self",
".",
"_kvstore",
".",
"pull",
"(",
"name",
"+",
"\"_full\"",
",",
"param_on_devs",
",",
"priority",
"=",
"-",
"idx",
")"
] | Installs and initializes SVRGOptimizer. The SVRGOptimizer is a wrapper class for a regular optimizer that is
passed in and a special AssignmentOptimizer to accumulate the full gradients. If KVStore is 'local' or None,
the full gradients will be accumulated locally without pushing to the KVStore. Otherwise, additional keys will
be pushed to accumulate the full gradients in the KVStore.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed. | [
"Installs",
"and",
"initializes",
"SVRGOptimizer",
".",
"The",
"SVRGOptimizer",
"is",
"a",
"wrapper",
"class",
"for",
"a",
"regular",
"optimizer",
"that",
"is",
"passed",
"in",
"and",
"a",
"special",
"AssignmentOptimizer",
"to",
"accumulate",
"the",
"full",
"gradients",
".",
"If",
"KVStore",
"is",
"local",
"or",
"None",
"the",
"full",
"gradients",
"will",
"be",
"accumulated",
"locally",
"without",
"pushing",
"to",
"the",
"KVStore",
".",
"Otherwise",
"additional",
"keys",
"will",
"be",
"pushed",
"to",
"accumulate",
"the",
"full",
"gradients",
"in",
"the",
"KVStore",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/svrg_optimization/svrg_module.py#L114-L151 |
23,501 | apache/incubator-mxnet | python/mxnet/contrib/svrg_optimization/svrg_module.py | SVRGModule.bind | def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'):
"""Binds the symbols to construct executors for both two modules. This is necessary before one
can perform computation with the SVRGModule.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bound for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
"""
# force rebinding is typically used when one want to switch from
# training to prediction phase.
super(SVRGModule, self).bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind,
shared_module, grad_req)
if for_training:
self._mod_aux.bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, shared_module,
grad_req) | python | def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'):
"""Binds the symbols to construct executors for both two modules. This is necessary before one
can perform computation with the SVRGModule.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bound for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
"""
# force rebinding is typically used when one want to switch from
# training to prediction phase.
super(SVRGModule, self).bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind,
shared_module, grad_req)
if for_training:
self._mod_aux.bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, shared_module,
grad_req) | [
"def",
"bind",
"(",
"self",
",",
"data_shapes",
",",
"label_shapes",
"=",
"None",
",",
"for_training",
"=",
"True",
",",
"inputs_need_grad",
"=",
"False",
",",
"force_rebind",
"=",
"False",
",",
"shared_module",
"=",
"None",
",",
"grad_req",
"=",
"'write'",
")",
":",
"# force rebinding is typically used when one want to switch from",
"# training to prediction phase.",
"super",
"(",
"SVRGModule",
",",
"self",
")",
".",
"bind",
"(",
"data_shapes",
",",
"label_shapes",
",",
"for_training",
",",
"inputs_need_grad",
",",
"force_rebind",
",",
"shared_module",
",",
"grad_req",
")",
"if",
"for_training",
":",
"self",
".",
"_mod_aux",
".",
"bind",
"(",
"data_shapes",
",",
"label_shapes",
",",
"for_training",
",",
"inputs_need_grad",
",",
"force_rebind",
",",
"shared_module",
",",
"grad_req",
")"
] | Binds the symbols to construct executors for both two modules. This is necessary before one
can perform computation with the SVRGModule.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bound for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths). | [
"Binds",
"the",
"symbols",
"to",
"construct",
"executors",
"for",
"both",
"two",
"modules",
".",
"This",
"is",
"necessary",
"before",
"one",
"can",
"perform",
"computation",
"with",
"the",
"SVRGModule",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/svrg_optimization/svrg_module.py#L198-L230 |
23,502 | apache/incubator-mxnet | python/mxnet/contrib/svrg_optimization/svrg_module.py | SVRGModule.forward | def forward(self, data_batch, is_train=None):
"""Forward computation for both two modules. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
See Also
----------
:meth:`BaseModule.forward`.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``.
"""
super(SVRGModule, self).forward(data_batch, is_train)
if is_train:
self._mod_aux.forward(data_batch, is_train) | python | def forward(self, data_batch, is_train=None):
"""Forward computation for both two modules. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
See Also
----------
:meth:`BaseModule.forward`.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``.
"""
super(SVRGModule, self).forward(data_batch, is_train)
if is_train:
self._mod_aux.forward(data_batch, is_train) | [
"def",
"forward",
"(",
"self",
",",
"data_batch",
",",
"is_train",
"=",
"None",
")",
":",
"super",
"(",
"SVRGModule",
",",
"self",
")",
".",
"forward",
"(",
"data_batch",
",",
"is_train",
")",
"if",
"is_train",
":",
"self",
".",
"_mod_aux",
".",
"forward",
"(",
"data_batch",
",",
"is_train",
")"
] | Forward computation for both two modules. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
See Also
----------
:meth:`BaseModule.forward`.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``. | [
"Forward",
"computation",
"for",
"both",
"two",
"modules",
".",
"It",
"supports",
"data",
"batches",
"with",
"different",
"shapes",
"such",
"as",
"different",
"batch",
"sizes",
"or",
"different",
"image",
"sizes",
".",
"If",
"reshaping",
"of",
"data",
"batch",
"relates",
"to",
"modification",
"of",
"symbol",
"or",
"module",
"such",
"as",
"changing",
"image",
"layout",
"ordering",
"or",
"switching",
"from",
"training",
"to",
"predicting",
"module",
"rebinding",
"is",
"required",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/svrg_optimization/svrg_module.py#L232-L253 |
23,503 | apache/incubator-mxnet | python/mxnet/contrib/svrg_optimization/svrg_module.py | SVRGModule.update_full_grads | def update_full_grads(self, train_data):
"""Computes the gradients over all data w.r.t weights of past
m epochs. For distributed env, it will accumulate full grads in the kvstore.
Parameters
----------
train_data: DataIter
Train data iterator
"""
param_names = self._exec_group.param_names
arg, aux = self.get_params()
self._mod_aux.set_params(arg_params=arg, aux_params=aux)
train_data.reset()
nbatch = 0
padding = 0
for batch in train_data:
self._mod_aux.forward(batch, is_train=True)
self._mod_aux.backward()
nbatch += 1
for ctx in range(self._ctx_len):
for index, name in enumerate(param_names):
grads = self._mod_aux._exec_group.grad_arrays[index][ctx]
self._param_dict[ctx][name] = mx.nd.broadcast_add(self._param_dict[ctx][name], grads, axis=0)
padding = batch.pad
true_num_batch = nbatch - padding / train_data.batch_size
for name in param_names:
grad_list = []
for i in range(self._ctx_len):
self._param_dict[i][name] /= true_num_batch
grad_list.append(self._param_dict[i][name])
if self._kvstore:
# If in distributed mode, push a list of gradients from each worker/device to the KVStore
self._accumulate_kvstore(name, grad_list) | python | def update_full_grads(self, train_data):
"""Computes the gradients over all data w.r.t weights of past
m epochs. For distributed env, it will accumulate full grads in the kvstore.
Parameters
----------
train_data: DataIter
Train data iterator
"""
param_names = self._exec_group.param_names
arg, aux = self.get_params()
self._mod_aux.set_params(arg_params=arg, aux_params=aux)
train_data.reset()
nbatch = 0
padding = 0
for batch in train_data:
self._mod_aux.forward(batch, is_train=True)
self._mod_aux.backward()
nbatch += 1
for ctx in range(self._ctx_len):
for index, name in enumerate(param_names):
grads = self._mod_aux._exec_group.grad_arrays[index][ctx]
self._param_dict[ctx][name] = mx.nd.broadcast_add(self._param_dict[ctx][name], grads, axis=0)
padding = batch.pad
true_num_batch = nbatch - padding / train_data.batch_size
for name in param_names:
grad_list = []
for i in range(self._ctx_len):
self._param_dict[i][name] /= true_num_batch
grad_list.append(self._param_dict[i][name])
if self._kvstore:
# If in distributed mode, push a list of gradients from each worker/device to the KVStore
self._accumulate_kvstore(name, grad_list) | [
"def",
"update_full_grads",
"(",
"self",
",",
"train_data",
")",
":",
"param_names",
"=",
"self",
".",
"_exec_group",
".",
"param_names",
"arg",
",",
"aux",
"=",
"self",
".",
"get_params",
"(",
")",
"self",
".",
"_mod_aux",
".",
"set_params",
"(",
"arg_params",
"=",
"arg",
",",
"aux_params",
"=",
"aux",
")",
"train_data",
".",
"reset",
"(",
")",
"nbatch",
"=",
"0",
"padding",
"=",
"0",
"for",
"batch",
"in",
"train_data",
":",
"self",
".",
"_mod_aux",
".",
"forward",
"(",
"batch",
",",
"is_train",
"=",
"True",
")",
"self",
".",
"_mod_aux",
".",
"backward",
"(",
")",
"nbatch",
"+=",
"1",
"for",
"ctx",
"in",
"range",
"(",
"self",
".",
"_ctx_len",
")",
":",
"for",
"index",
",",
"name",
"in",
"enumerate",
"(",
"param_names",
")",
":",
"grads",
"=",
"self",
".",
"_mod_aux",
".",
"_exec_group",
".",
"grad_arrays",
"[",
"index",
"]",
"[",
"ctx",
"]",
"self",
".",
"_param_dict",
"[",
"ctx",
"]",
"[",
"name",
"]",
"=",
"mx",
".",
"nd",
".",
"broadcast_add",
"(",
"self",
".",
"_param_dict",
"[",
"ctx",
"]",
"[",
"name",
"]",
",",
"grads",
",",
"axis",
"=",
"0",
")",
"padding",
"=",
"batch",
".",
"pad",
"true_num_batch",
"=",
"nbatch",
"-",
"padding",
"/",
"train_data",
".",
"batch_size",
"for",
"name",
"in",
"param_names",
":",
"grad_list",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_ctx_len",
")",
":",
"self",
".",
"_param_dict",
"[",
"i",
"]",
"[",
"name",
"]",
"/=",
"true_num_batch",
"grad_list",
".",
"append",
"(",
"self",
".",
"_param_dict",
"[",
"i",
"]",
"[",
"name",
"]",
")",
"if",
"self",
".",
"_kvstore",
":",
"# If in distributed mode, push a list of gradients from each worker/device to the KVStore",
"self",
".",
"_accumulate_kvstore",
"(",
"name",
",",
"grad_list",
")"
] | Computes the gradients over all data w.r.t weights of past
m epochs. For distributed env, it will accumulate full grads in the kvstore.
Parameters
----------
train_data: DataIter
Train data iterator | [
"Computes",
"the",
"gradients",
"over",
"all",
"data",
"w",
".",
"r",
".",
"t",
"weights",
"of",
"past",
"m",
"epochs",
".",
"For",
"distributed",
"env",
"it",
"will",
"accumulate",
"full",
"grads",
"in",
"the",
"kvstore",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/svrg_optimization/svrg_module.py#L292-L325 |
23,504 | apache/incubator-mxnet | python/mxnet/contrib/svrg_optimization/svrg_module.py | SVRGModule._accumulate_kvstore | def _accumulate_kvstore(self, key, value):
"""Accumulate gradients over all data in the KVStore. In distributed setting, each worker sees a portion of
data. The full gradients will be aggregated from each worker in the KVStore.
Parameters
----------
key: int or str
Key in the KVStore.
value: NDArray, RowSparseNDArray
Average of the full gradients.
"""
# Accumulate full gradients for current epochs
self._kvstore.push(key + "_full", value)
self._kvstore._barrier()
self._kvstore.pull(key + "_full", value)
self._allocate_gradients(key, value) | python | def _accumulate_kvstore(self, key, value):
"""Accumulate gradients over all data in the KVStore. In distributed setting, each worker sees a portion of
data. The full gradients will be aggregated from each worker in the KVStore.
Parameters
----------
key: int or str
Key in the KVStore.
value: NDArray, RowSparseNDArray
Average of the full gradients.
"""
# Accumulate full gradients for current epochs
self._kvstore.push(key + "_full", value)
self._kvstore._barrier()
self._kvstore.pull(key + "_full", value)
self._allocate_gradients(key, value) | [
"def",
"_accumulate_kvstore",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"# Accumulate full gradients for current epochs",
"self",
".",
"_kvstore",
".",
"push",
"(",
"key",
"+",
"\"_full\"",
",",
"value",
")",
"self",
".",
"_kvstore",
".",
"_barrier",
"(",
")",
"self",
".",
"_kvstore",
".",
"pull",
"(",
"key",
"+",
"\"_full\"",
",",
"value",
")",
"self",
".",
"_allocate_gradients",
"(",
"key",
",",
"value",
")"
] | Accumulate gradients over all data in the KVStore. In distributed setting, each worker sees a portion of
data. The full gradients will be aggregated from each worker in the KVStore.
Parameters
----------
key: int or str
Key in the KVStore.
value: NDArray, RowSparseNDArray
Average of the full gradients. | [
"Accumulate",
"gradients",
"over",
"all",
"data",
"in",
"the",
"KVStore",
".",
"In",
"distributed",
"setting",
"each",
"worker",
"sees",
"a",
"portion",
"of",
"data",
".",
"The",
"full",
"gradients",
"will",
"be",
"aggregated",
"from",
"each",
"worker",
"in",
"the",
"KVStore",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/svrg_optimization/svrg_module.py#L327-L344 |
23,505 | apache/incubator-mxnet | python/mxnet/contrib/svrg_optimization/svrg_module.py | SVRGModule._allocate_gradients | def _allocate_gradients(self, key, value):
"""Allocate average of full gradients accumulated in the KVStore to each device.
Parameters
----------
key: int or str
Key in the kvstore.
value: List of NDArray, List of RowSparseNDArray
A list of average of the full gradients in the KVStore.
"""
for i in range(self._ctx_len):
self._param_dict[i][key] = value[i] / self._ctx_len | python | def _allocate_gradients(self, key, value):
"""Allocate average of full gradients accumulated in the KVStore to each device.
Parameters
----------
key: int or str
Key in the kvstore.
value: List of NDArray, List of RowSparseNDArray
A list of average of the full gradients in the KVStore.
"""
for i in range(self._ctx_len):
self._param_dict[i][key] = value[i] / self._ctx_len | [
"def",
"_allocate_gradients",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_ctx_len",
")",
":",
"self",
".",
"_param_dict",
"[",
"i",
"]",
"[",
"key",
"]",
"=",
"value",
"[",
"i",
"]",
"/",
"self",
".",
"_ctx_len"
] | Allocate average of full gradients accumulated in the KVStore to each device.
Parameters
----------
key: int or str
Key in the kvstore.
value: List of NDArray, List of RowSparseNDArray
A list of average of the full gradients in the KVStore. | [
"Allocate",
"average",
"of",
"full",
"gradients",
"accumulated",
"in",
"the",
"KVStore",
"to",
"each",
"device",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/svrg_optimization/svrg_module.py#L346-L358 |
23,506 | apache/incubator-mxnet | python/mxnet/contrib/svrg_optimization/svrg_module.py | SVRGModule._update_svrg_gradients | def _update_svrg_gradients(self):
"""Calculates gradients based on the SVRG update rule.
"""
param_names = self._exec_group.param_names
for ctx in range(self._ctx_len):
for index, name in enumerate(param_names):
g_curr_batch_reg = self._exec_group.grad_arrays[index][ctx]
g_curr_batch_special = self._mod_aux._exec_group.grad_arrays[index][ctx]
g_special_weight_all_batch = self._param_dict[ctx][name]
g_svrg = self._svrg_grads_update_rule(g_curr_batch_reg, g_curr_batch_special,
g_special_weight_all_batch)
self._exec_group.grad_arrays[index][ctx] = g_svrg | python | def _update_svrg_gradients(self):
"""Calculates gradients based on the SVRG update rule.
"""
param_names = self._exec_group.param_names
for ctx in range(self._ctx_len):
for index, name in enumerate(param_names):
g_curr_batch_reg = self._exec_group.grad_arrays[index][ctx]
g_curr_batch_special = self._mod_aux._exec_group.grad_arrays[index][ctx]
g_special_weight_all_batch = self._param_dict[ctx][name]
g_svrg = self._svrg_grads_update_rule(g_curr_batch_reg, g_curr_batch_special,
g_special_weight_all_batch)
self._exec_group.grad_arrays[index][ctx] = g_svrg | [
"def",
"_update_svrg_gradients",
"(",
"self",
")",
":",
"param_names",
"=",
"self",
".",
"_exec_group",
".",
"param_names",
"for",
"ctx",
"in",
"range",
"(",
"self",
".",
"_ctx_len",
")",
":",
"for",
"index",
",",
"name",
"in",
"enumerate",
"(",
"param_names",
")",
":",
"g_curr_batch_reg",
"=",
"self",
".",
"_exec_group",
".",
"grad_arrays",
"[",
"index",
"]",
"[",
"ctx",
"]",
"g_curr_batch_special",
"=",
"self",
".",
"_mod_aux",
".",
"_exec_group",
".",
"grad_arrays",
"[",
"index",
"]",
"[",
"ctx",
"]",
"g_special_weight_all_batch",
"=",
"self",
".",
"_param_dict",
"[",
"ctx",
"]",
"[",
"name",
"]",
"g_svrg",
"=",
"self",
".",
"_svrg_grads_update_rule",
"(",
"g_curr_batch_reg",
",",
"g_curr_batch_special",
",",
"g_special_weight_all_batch",
")",
"self",
".",
"_exec_group",
".",
"grad_arrays",
"[",
"index",
"]",
"[",
"ctx",
"]",
"=",
"g_svrg"
] | Calculates gradients based on the SVRG update rule. | [
"Calculates",
"gradients",
"based",
"on",
"the",
"SVRG",
"update",
"rule",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/svrg_optimization/svrg_module.py#L382-L393 |
23,507 | apache/incubator-mxnet | python/mxnet/contrib/svrg_optimization/svrg_module.py | SVRGModule.prepare | def prepare(self, data_batch, sparse_row_id_fn=None):
"""Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
"""
super(SVRGModule, self).prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn)
self._mod_aux.prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) | python | def prepare(self, data_batch, sparse_row_id_fn=None):
"""Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
"""
super(SVRGModule, self).prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn)
self._mod_aux.prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) | [
"def",
"prepare",
"(",
"self",
",",
"data_batch",
",",
"sparse_row_id_fn",
"=",
"None",
")",
":",
"super",
"(",
"SVRGModule",
",",
"self",
")",
".",
"prepare",
"(",
"data_batch",
",",
"sparse_row_id_fn",
"=",
"sparse_row_id_fn",
")",
"self",
".",
"_mod_aux",
".",
"prepare",
"(",
"data_batch",
",",
"sparse_row_id_fn",
"=",
"sparse_row_id_fn",
")"
] | Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull. | [
"Prepares",
"two",
"modules",
"for",
"processing",
"a",
"data",
"batch",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/svrg_optimization/svrg_module.py#L554-L579 |
23,508 | apache/incubator-mxnet | python/mxnet/registry.py | get_register_func | def get_register_func(base_class, nickname):
"""Get registrator function.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
"""
if base_class not in _REGISTRY:
_REGISTRY[base_class] = {}
registry = _REGISTRY[base_class]
def register(klass, name=None):
"""Register functions"""
assert issubclass(klass, base_class), \
"Can only register subclass of %s"%base_class.__name__
if name is None:
name = klass.__name__
name = name.lower()
if name in registry:
warnings.warn(
"\033[91mNew %s %s.%s registered with name %s is"
"overriding existing %s %s.%s\033[0m"%(
nickname, klass.__module__, klass.__name__, name,
nickname, registry[name].__module__, registry[name].__name__),
UserWarning, stacklevel=2)
registry[name] = klass
return klass
register.__doc__ = "Register %s to the %s factory"%(nickname, nickname)
return register | python | def get_register_func(base_class, nickname):
"""Get registrator function.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
"""
if base_class not in _REGISTRY:
_REGISTRY[base_class] = {}
registry = _REGISTRY[base_class]
def register(klass, name=None):
"""Register functions"""
assert issubclass(klass, base_class), \
"Can only register subclass of %s"%base_class.__name__
if name is None:
name = klass.__name__
name = name.lower()
if name in registry:
warnings.warn(
"\033[91mNew %s %s.%s registered with name %s is"
"overriding existing %s %s.%s\033[0m"%(
nickname, klass.__module__, klass.__name__, name,
nickname, registry[name].__module__, registry[name].__name__),
UserWarning, stacklevel=2)
registry[name] = klass
return klass
register.__doc__ = "Register %s to the %s factory"%(nickname, nickname)
return register | [
"def",
"get_register_func",
"(",
"base_class",
",",
"nickname",
")",
":",
"if",
"base_class",
"not",
"in",
"_REGISTRY",
":",
"_REGISTRY",
"[",
"base_class",
"]",
"=",
"{",
"}",
"registry",
"=",
"_REGISTRY",
"[",
"base_class",
"]",
"def",
"register",
"(",
"klass",
",",
"name",
"=",
"None",
")",
":",
"\"\"\"Register functions\"\"\"",
"assert",
"issubclass",
"(",
"klass",
",",
"base_class",
")",
",",
"\"Can only register subclass of %s\"",
"%",
"base_class",
".",
"__name__",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"klass",
".",
"__name__",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"if",
"name",
"in",
"registry",
":",
"warnings",
".",
"warn",
"(",
"\"\\033[91mNew %s %s.%s registered with name %s is\"",
"\"overriding existing %s %s.%s\\033[0m\"",
"%",
"(",
"nickname",
",",
"klass",
".",
"__module__",
",",
"klass",
".",
"__name__",
",",
"name",
",",
"nickname",
",",
"registry",
"[",
"name",
"]",
".",
"__module__",
",",
"registry",
"[",
"name",
"]",
".",
"__name__",
")",
",",
"UserWarning",
",",
"stacklevel",
"=",
"2",
")",
"registry",
"[",
"name",
"]",
"=",
"klass",
"return",
"klass",
"register",
".",
"__doc__",
"=",
"\"Register %s to the %s factory\"",
"%",
"(",
"nickname",
",",
"nickname",
")",
"return",
"register"
] | Get registrator function.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function | [
"Get",
"registrator",
"function",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/registry.py#L49-L85 |
23,509 | apache/incubator-mxnet | python/mxnet/registry.py | get_alias_func | def get_alias_func(base_class, nickname):
"""Get registrator function that allow aliases.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
"""
register = get_register_func(base_class, nickname)
def alias(*aliases):
"""alias registrator"""
def reg(klass):
"""registrator function"""
for name in aliases:
register(klass, name)
return klass
return reg
return alias | python | def get_alias_func(base_class, nickname):
"""Get registrator function that allow aliases.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
"""
register = get_register_func(base_class, nickname)
def alias(*aliases):
"""alias registrator"""
def reg(klass):
"""registrator function"""
for name in aliases:
register(klass, name)
return klass
return reg
return alias | [
"def",
"get_alias_func",
"(",
"base_class",
",",
"nickname",
")",
":",
"register",
"=",
"get_register_func",
"(",
"base_class",
",",
"nickname",
")",
"def",
"alias",
"(",
"*",
"aliases",
")",
":",
"\"\"\"alias registrator\"\"\"",
"def",
"reg",
"(",
"klass",
")",
":",
"\"\"\"registrator function\"\"\"",
"for",
"name",
"in",
"aliases",
":",
"register",
"(",
"klass",
",",
"name",
")",
"return",
"klass",
"return",
"reg",
"return",
"alias"
] | Get registrator function that allow aliases.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function | [
"Get",
"registrator",
"function",
"that",
"allow",
"aliases",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/registry.py#L88-L112 |
23,510 | apache/incubator-mxnet | example/cnn_text_classification/data_helpers.py | pad_sentences | def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences | python | def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences | [
"def",
"pad_sentences",
"(",
"sentences",
",",
"padding_word",
"=",
"\"</s>\"",
")",
":",
"sequence_length",
"=",
"max",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"sentences",
")",
"padded_sentences",
"=",
"[",
"]",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"sentences",
")",
":",
"num_padding",
"=",
"sequence_length",
"-",
"len",
"(",
"sentence",
")",
"new_sentence",
"=",
"sentence",
"+",
"[",
"padding_word",
"]",
"*",
"num_padding",
"padded_sentences",
".",
"append",
"(",
"new_sentence",
")",
"return",
"padded_sentences"
] | Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences. | [
"Pads",
"all",
"sentences",
"to",
"the",
"same",
"length",
".",
"The",
"length",
"is",
"defined",
"by",
"the",
"longest",
"sentence",
".",
"Returns",
"padded",
"sentences",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/data_helpers.py#L79-L89 |
23,511 | apache/incubator-mxnet | example/cnn_text_classification/data_helpers.py | build_input_data | def build_input_data(sentences, labels, vocabulary):
"""Maps sentencs and labels to vectors based on a vocabulary."""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y] | python | def build_input_data(sentences, labels, vocabulary):
"""Maps sentencs and labels to vectors based on a vocabulary."""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y] | [
"def",
"build_input_data",
"(",
"sentences",
",",
"labels",
",",
"vocabulary",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"vocabulary",
"[",
"word",
"]",
"for",
"word",
"in",
"sentence",
"]",
"for",
"sentence",
"in",
"sentences",
"]",
")",
"y",
"=",
"np",
".",
"array",
"(",
"labels",
")",
"return",
"[",
"x",
",",
"y",
"]"
] | Maps sentencs and labels to vectors based on a vocabulary. | [
"Maps",
"sentencs",
"and",
"labels",
"to",
"vectors",
"based",
"on",
"a",
"vocabulary",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/data_helpers.py#L105-L109 |
23,512 | apache/incubator-mxnet | example/cnn_text_classification/data_helpers.py | build_input_data_with_word2vec | def build_input_data_with_word2vec(sentences, labels, word2vec_list):
"""
Map sentences and labels to vectors based on a pretrained word2vec
"""
x_vec = []
for sent in sentences:
vec = []
for word in sent:
if word in word2vec_list:
vec.append(word2vec_list[word])
else:
vec.append(word2vec_list['</s>'])
x_vec.append(vec)
x_vec = np.array(x_vec)
y_vec = np.array(labels)
return [x_vec, y_vec] | python | def build_input_data_with_word2vec(sentences, labels, word2vec_list):
"""
Map sentences and labels to vectors based on a pretrained word2vec
"""
x_vec = []
for sent in sentences:
vec = []
for word in sent:
if word in word2vec_list:
vec.append(word2vec_list[word])
else:
vec.append(word2vec_list['</s>'])
x_vec.append(vec)
x_vec = np.array(x_vec)
y_vec = np.array(labels)
return [x_vec, y_vec] | [
"def",
"build_input_data_with_word2vec",
"(",
"sentences",
",",
"labels",
",",
"word2vec_list",
")",
":",
"x_vec",
"=",
"[",
"]",
"for",
"sent",
"in",
"sentences",
":",
"vec",
"=",
"[",
"]",
"for",
"word",
"in",
"sent",
":",
"if",
"word",
"in",
"word2vec_list",
":",
"vec",
".",
"append",
"(",
"word2vec_list",
"[",
"word",
"]",
")",
"else",
":",
"vec",
".",
"append",
"(",
"word2vec_list",
"[",
"'</s>'",
"]",
")",
"x_vec",
".",
"append",
"(",
"vec",
")",
"x_vec",
"=",
"np",
".",
"array",
"(",
"x_vec",
")",
"y_vec",
"=",
"np",
".",
"array",
"(",
"labels",
")",
"return",
"[",
"x_vec",
",",
"y_vec",
"]"
] | Map sentences and labels to vectors based on a pretrained word2vec | [
"Map",
"sentences",
"and",
"labels",
"to",
"vectors",
"based",
"on",
"a",
"pretrained",
"word2vec"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/data_helpers.py#L112-L127 |
23,513 | apache/incubator-mxnet | example/cnn_text_classification/data_helpers.py | batch_iter | def batch_iter(data, batch_size, num_epochs):
"""Generates a batch iterator for a dataset."""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index] | python | def batch_iter(data, batch_size, num_epochs):
"""Generates a batch iterator for a dataset."""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index] | [
"def",
"batch_iter",
"(",
"data",
",",
"batch_size",
",",
"num_epochs",
")",
":",
"data",
"=",
"np",
".",
"array",
"(",
"data",
")",
"data_size",
"=",
"len",
"(",
"data",
")",
"num_batches_per_epoch",
"=",
"int",
"(",
"len",
"(",
"data",
")",
"/",
"batch_size",
")",
"+",
"1",
"for",
"epoch",
"in",
"range",
"(",
"num_epochs",
")",
":",
"# Shuffle the data at each epoch",
"shuffle_indices",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"np",
".",
"arange",
"(",
"data_size",
")",
")",
"shuffled_data",
"=",
"data",
"[",
"shuffle_indices",
"]",
"for",
"batch_num",
"in",
"range",
"(",
"num_batches_per_epoch",
")",
":",
"start_index",
"=",
"batch_num",
"*",
"batch_size",
"end_index",
"=",
"min",
"(",
"(",
"batch_num",
"+",
"1",
")",
"*",
"batch_size",
",",
"data_size",
")",
"yield",
"shuffled_data",
"[",
"start_index",
":",
"end_index",
"]"
] | Generates a batch iterator for a dataset. | [
"Generates",
"a",
"batch",
"iterator",
"for",
"a",
"dataset",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/data_helpers.py#L153-L165 |
23,514 | apache/incubator-mxnet | example/cnn_text_classification/data_helpers.py | load_pretrained_word2vec | def load_pretrained_word2vec(infile):
"""Load the pre-trained word2vec from file."""
if isinstance(infile, str):
infile = open(infile)
word2vec_list = {}
for idx, line in enumerate(infile):
if idx == 0:
vocab_size, dim = line.strip().split()
else:
tks = line.strip().split()
word2vec_list[tks[0]] = map(float, tks[1:])
return word2vec_list | python | def load_pretrained_word2vec(infile):
"""Load the pre-trained word2vec from file."""
if isinstance(infile, str):
infile = open(infile)
word2vec_list = {}
for idx, line in enumerate(infile):
if idx == 0:
vocab_size, dim = line.strip().split()
else:
tks = line.strip().split()
word2vec_list[tks[0]] = map(float, tks[1:])
return word2vec_list | [
"def",
"load_pretrained_word2vec",
"(",
"infile",
")",
":",
"if",
"isinstance",
"(",
"infile",
",",
"str",
")",
":",
"infile",
"=",
"open",
"(",
"infile",
")",
"word2vec_list",
"=",
"{",
"}",
"for",
"idx",
",",
"line",
"in",
"enumerate",
"(",
"infile",
")",
":",
"if",
"idx",
"==",
"0",
":",
"vocab_size",
",",
"dim",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"else",
":",
"tks",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"word2vec_list",
"[",
"tks",
"[",
"0",
"]",
"]",
"=",
"map",
"(",
"float",
",",
"tks",
"[",
"1",
":",
"]",
")",
"return",
"word2vec_list"
] | Load the pre-trained word2vec from file. | [
"Load",
"the",
"pre",
"-",
"trained",
"word2vec",
"from",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/data_helpers.py#L168-L181 |
23,515 | apache/incubator-mxnet | example/caffe/caffe_net.py | get_mlp | def get_mlp():
"""Get multi-layer perceptron"""
data = mx.symbol.Variable('data')
fc1 = mx.symbol.CaffeOp(data_0=data, num_weight=2, name='fc1',
prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 128} }")
act1 = mx.symbol.CaffeOp(data_0=fc1, prototxt="layer{type:\"TanH\"}")
fc2 = mx.symbol.CaffeOp(data_0=act1, num_weight=2, name='fc2',
prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 64} }")
act2 = mx.symbol.CaffeOp(data_0=fc2, prototxt="layer{type:\"TanH\"}")
fc3 = mx.symbol.CaffeOp(data_0=act2, num_weight=2, name='fc3',
prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 10}}")
if use_caffe_loss:
label = mx.symbol.Variable('softmax_label')
mlp = mx.symbol.CaffeLoss(data=fc3, label=label, grad_scale=1, name='softmax',
prototxt="layer{type:\"SoftmaxWithLoss\"}")
else:
mlp = mx.symbol.SoftmaxOutput(data=fc3, name='softmax')
return mlp | python | def get_mlp():
"""Get multi-layer perceptron"""
data = mx.symbol.Variable('data')
fc1 = mx.symbol.CaffeOp(data_0=data, num_weight=2, name='fc1',
prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 128} }")
act1 = mx.symbol.CaffeOp(data_0=fc1, prototxt="layer{type:\"TanH\"}")
fc2 = mx.symbol.CaffeOp(data_0=act1, num_weight=2, name='fc2',
prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 64} }")
act2 = mx.symbol.CaffeOp(data_0=fc2, prototxt="layer{type:\"TanH\"}")
fc3 = mx.symbol.CaffeOp(data_0=act2, num_weight=2, name='fc3',
prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 10}}")
if use_caffe_loss:
label = mx.symbol.Variable('softmax_label')
mlp = mx.symbol.CaffeLoss(data=fc3, label=label, grad_scale=1, name='softmax',
prototxt="layer{type:\"SoftmaxWithLoss\"}")
else:
mlp = mx.symbol.SoftmaxOutput(data=fc3, name='softmax')
return mlp | [
"def",
"get_mlp",
"(",
")",
":",
"data",
"=",
"mx",
".",
"symbol",
".",
"Variable",
"(",
"'data'",
")",
"fc1",
"=",
"mx",
".",
"symbol",
".",
"CaffeOp",
"(",
"data_0",
"=",
"data",
",",
"num_weight",
"=",
"2",
",",
"name",
"=",
"'fc1'",
",",
"prototxt",
"=",
"\"layer{type:\\\"InnerProduct\\\" inner_product_param{num_output: 128} }\"",
")",
"act1",
"=",
"mx",
".",
"symbol",
".",
"CaffeOp",
"(",
"data_0",
"=",
"fc1",
",",
"prototxt",
"=",
"\"layer{type:\\\"TanH\\\"}\"",
")",
"fc2",
"=",
"mx",
".",
"symbol",
".",
"CaffeOp",
"(",
"data_0",
"=",
"act1",
",",
"num_weight",
"=",
"2",
",",
"name",
"=",
"'fc2'",
",",
"prototxt",
"=",
"\"layer{type:\\\"InnerProduct\\\" inner_product_param{num_output: 64} }\"",
")",
"act2",
"=",
"mx",
".",
"symbol",
".",
"CaffeOp",
"(",
"data_0",
"=",
"fc2",
",",
"prototxt",
"=",
"\"layer{type:\\\"TanH\\\"}\"",
")",
"fc3",
"=",
"mx",
".",
"symbol",
".",
"CaffeOp",
"(",
"data_0",
"=",
"act2",
",",
"num_weight",
"=",
"2",
",",
"name",
"=",
"'fc3'",
",",
"prototxt",
"=",
"\"layer{type:\\\"InnerProduct\\\" inner_product_param{num_output: 10}}\"",
")",
"if",
"use_caffe_loss",
":",
"label",
"=",
"mx",
".",
"symbol",
".",
"Variable",
"(",
"'softmax_label'",
")",
"mlp",
"=",
"mx",
".",
"symbol",
".",
"CaffeLoss",
"(",
"data",
"=",
"fc3",
",",
"label",
"=",
"label",
",",
"grad_scale",
"=",
"1",
",",
"name",
"=",
"'softmax'",
",",
"prototxt",
"=",
"\"layer{type:\\\"SoftmaxWithLoss\\\"}\"",
")",
"else",
":",
"mlp",
"=",
"mx",
".",
"symbol",
".",
"SoftmaxOutput",
"(",
"data",
"=",
"fc3",
",",
"name",
"=",
"'softmax'",
")",
"return",
"mlp"
] | Get multi-layer perceptron | [
"Get",
"multi",
"-",
"layer",
"perceptron"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/caffe/caffe_net.py#L25-L42 |
23,516 | apache/incubator-mxnet | example/sparse/linear_classification/weighted_softmax_ce.py | WeightedSoftmaxCrossEntropyLoss.forward | def forward(self, is_train, req, in_data, out_data, aux):
"""Implements forward computation.
is_train : bool, whether forwarding for training or testing.
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to out_data. 'null' means skip assignment, etc.
in_data : list of NDArray, input data.
out_data : list of NDArray, pre-allocated output buffers.
aux : list of NDArray, mutable auxiliary states. Usually not used.
"""
data = in_data[0]
label = in_data[1]
pred = mx.nd.SoftmaxOutput(data, label)
self.assign(out_data[0], req[0], pred) | python | def forward(self, is_train, req, in_data, out_data, aux):
"""Implements forward computation.
is_train : bool, whether forwarding for training or testing.
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to out_data. 'null' means skip assignment, etc.
in_data : list of NDArray, input data.
out_data : list of NDArray, pre-allocated output buffers.
aux : list of NDArray, mutable auxiliary states. Usually not used.
"""
data = in_data[0]
label = in_data[1]
pred = mx.nd.SoftmaxOutput(data, label)
self.assign(out_data[0], req[0], pred) | [
"def",
"forward",
"(",
"self",
",",
"is_train",
",",
"req",
",",
"in_data",
",",
"out_data",
",",
"aux",
")",
":",
"data",
"=",
"in_data",
"[",
"0",
"]",
"label",
"=",
"in_data",
"[",
"1",
"]",
"pred",
"=",
"mx",
".",
"nd",
".",
"SoftmaxOutput",
"(",
"data",
",",
"label",
")",
"self",
".",
"assign",
"(",
"out_data",
"[",
"0",
"]",
",",
"req",
"[",
"0",
"]",
",",
"pred",
")"
] | Implements forward computation.
is_train : bool, whether forwarding for training or testing.
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to out_data. 'null' means skip assignment, etc.
in_data : list of NDArray, input data.
out_data : list of NDArray, pre-allocated output buffers.
aux : list of NDArray, mutable auxiliary states. Usually not used. | [
"Implements",
"forward",
"computation",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/sparse/linear_classification/weighted_softmax_ce.py#L30-L42 |
23,517 | apache/incubator-mxnet | example/sparse/linear_classification/weighted_softmax_ce.py | WeightedSoftmaxCrossEntropyLoss.backward | def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
"""Implements backward computation
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to in_grad
out_grad : list of NDArray, gradient w.r.t. output data.
in_grad : list of NDArray, gradient w.r.t. input data. This is the output buffer.
"""
label = in_data[1]
pred = out_data[0]
dx = pred - mx.nd.one_hot(label, 2)
pos_cls_weight = self.positive_cls_weight
scale_factor = ((1 + label * pos_cls_weight) / pos_cls_weight).reshape((pred.shape[0],1))
rescaled_dx = scale_factor * dx
self.assign(in_grad[0], req[0], rescaled_dx) | python | def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
"""Implements backward computation
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to in_grad
out_grad : list of NDArray, gradient w.r.t. output data.
in_grad : list of NDArray, gradient w.r.t. input data. This is the output buffer.
"""
label = in_data[1]
pred = out_data[0]
dx = pred - mx.nd.one_hot(label, 2)
pos_cls_weight = self.positive_cls_weight
scale_factor = ((1 + label * pos_cls_weight) / pos_cls_weight).reshape((pred.shape[0],1))
rescaled_dx = scale_factor * dx
self.assign(in_grad[0], req[0], rescaled_dx) | [
"def",
"backward",
"(",
"self",
",",
"req",
",",
"out_grad",
",",
"in_data",
",",
"out_data",
",",
"in_grad",
",",
"aux",
")",
":",
"label",
"=",
"in_data",
"[",
"1",
"]",
"pred",
"=",
"out_data",
"[",
"0",
"]",
"dx",
"=",
"pred",
"-",
"mx",
".",
"nd",
".",
"one_hot",
"(",
"label",
",",
"2",
")",
"pos_cls_weight",
"=",
"self",
".",
"positive_cls_weight",
"scale_factor",
"=",
"(",
"(",
"1",
"+",
"label",
"*",
"pos_cls_weight",
")",
"/",
"pos_cls_weight",
")",
".",
"reshape",
"(",
"(",
"pred",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"rescaled_dx",
"=",
"scale_factor",
"*",
"dx",
"self",
".",
"assign",
"(",
"in_grad",
"[",
"0",
"]",
",",
"req",
"[",
"0",
"]",
",",
"rescaled_dx",
")"
] | Implements backward computation
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to in_grad
out_grad : list of NDArray, gradient w.r.t. output data.
in_grad : list of NDArray, gradient w.r.t. input data. This is the output buffer. | [
"Implements",
"backward",
"computation"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/sparse/linear_classification/weighted_softmax_ce.py#L44-L57 |
23,518 | apache/incubator-mxnet | python/mxnet/module/bucketing_module.py | BucketingModule._reset_bind | def _reset_bind(self):
"""Internal utility function to reset binding."""
self.binded = False
self._buckets = {}
self._curr_module = None
self._curr_bucket_key = None | python | def _reset_bind(self):
"""Internal utility function to reset binding."""
self.binded = False
self._buckets = {}
self._curr_module = None
self._curr_bucket_key = None | [
"def",
"_reset_bind",
"(",
"self",
")",
":",
"self",
".",
"binded",
"=",
"False",
"self",
".",
"_buckets",
"=",
"{",
"}",
"self",
".",
"_curr_module",
"=",
"None",
"self",
".",
"_curr_bucket_key",
"=",
"None"
] | Internal utility function to reset binding. | [
"Internal",
"utility",
"function",
"to",
"reset",
"binding",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/bucketing_module.py#L100-L105 |
23,519 | apache/incubator-mxnet | python/mxnet/module/bucketing_module.py | BucketingModule.data_names | def data_names(self):
"""A list of names for data required by this module."""
if self.binded:
return self._curr_module.data_names
else:
_, data_names, _ = self._call_sym_gen(self._default_bucket_key)
return data_names | python | def data_names(self):
"""A list of names for data required by this module."""
if self.binded:
return self._curr_module.data_names
else:
_, data_names, _ = self._call_sym_gen(self._default_bucket_key)
return data_names | [
"def",
"data_names",
"(",
"self",
")",
":",
"if",
"self",
".",
"binded",
":",
"return",
"self",
".",
"_curr_module",
".",
"data_names",
"else",
":",
"_",
",",
"data_names",
",",
"_",
"=",
"self",
".",
"_call_sym_gen",
"(",
"self",
".",
"_default_bucket_key",
")",
"return",
"data_names"
] | A list of names for data required by this module. | [
"A",
"list",
"of",
"names",
"for",
"data",
"required",
"by",
"this",
"module",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/bucketing_module.py#L112-L118 |
23,520 | apache/incubator-mxnet | python/mxnet/module/bucketing_module.py | BucketingModule.output_names | def output_names(self):
"""A list of names for the outputs of this module."""
if self.binded:
return self._curr_module.output_names
else:
symbol, _, _ = self._call_sym_gen(self._default_bucket_key)
return symbol.list_outputs() | python | def output_names(self):
"""A list of names for the outputs of this module."""
if self.binded:
return self._curr_module.output_names
else:
symbol, _, _ = self._call_sym_gen(self._default_bucket_key)
return symbol.list_outputs() | [
"def",
"output_names",
"(",
"self",
")",
":",
"if",
"self",
".",
"binded",
":",
"return",
"self",
".",
"_curr_module",
".",
"output_names",
"else",
":",
"symbol",
",",
"_",
",",
"_",
"=",
"self",
".",
"_call_sym_gen",
"(",
"self",
".",
"_default_bucket_key",
")",
"return",
"symbol",
".",
"list_outputs",
"(",
")"
] | A list of names for the outputs of this module. | [
"A",
"list",
"of",
"names",
"for",
"the",
"outputs",
"of",
"this",
"module",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/bucketing_module.py#L121-L127 |
23,521 | apache/incubator-mxnet | python/mxnet/module/bucketing_module.py | BucketingModule.set_states | def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._curr_module.set_states(states, value) | python | def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._curr_module.set_states(states, value) | [
"def",
"set_states",
"(",
"self",
",",
"states",
"=",
"None",
",",
"value",
"=",
"None",
")",
":",
"assert",
"self",
".",
"binded",
"and",
"self",
".",
"params_initialized",
"self",
".",
"_curr_module",
".",
"set_states",
"(",
"states",
",",
"value",
")"
] | Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays. | [
"Sets",
"value",
"for",
"states",
".",
"Only",
"one",
"of",
"states",
"&",
"values",
"can",
"be",
"specified",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/bucketing_module.py#L275-L287 |
23,522 | apache/incubator-mxnet | python/mxnet/module/bucketing_module.py | BucketingModule.bind | def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binding for a `BucketingModule` means setting up the buckets and binding the
executor for the default bucket key. Executors corresponding to other keys are
bound afterwards with `switch_bucket`.
Parameters
----------
data_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
label_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
for_training : bool
Default is ``True``.
inputs_need_grad : bool
Default is ``False``.
force_rebind : bool
Default is ``False``.
shared_module : BucketingModule
Default is ``None``. This value is currently not used.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
bucket_key : str (or any python object)
bucket key for binding. by default use the default_bucket_key
"""
# in case we already initialized params, keep it
if self.params_initialized:
arg_params, aux_params = self.get_params()
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already bound, ignoring bind()')
return
assert shared_module is None, 'shared_module for BucketingModule is not supported'
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
self._grad_req = grad_req
symbol, data_names, label_names = self._call_sym_gen(self._default_bucket_key)
module = Module(symbol, data_names, label_names, logger=self.logger,
context=self._context, work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names,
group2ctxs=self._group2ctxs,
compression_params=self._compression_params)
module.bind(data_shapes, label_shapes, for_training, inputs_need_grad,
force_rebind=False, shared_module=None, grad_req=self._grad_req)
self._curr_module = module
self._curr_bucket_key = self._default_bucket_key
self._buckets[self._default_bucket_key] = module
# copy back saved params, if already initialized
if self.params_initialized:
self.set_params(arg_params, aux_params) | python | def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binding for a `BucketingModule` means setting up the buckets and binding the
executor for the default bucket key. Executors corresponding to other keys are
bound afterwards with `switch_bucket`.
Parameters
----------
data_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
label_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
for_training : bool
Default is ``True``.
inputs_need_grad : bool
Default is ``False``.
force_rebind : bool
Default is ``False``.
shared_module : BucketingModule
Default is ``None``. This value is currently not used.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
bucket_key : str (or any python object)
bucket key for binding. by default use the default_bucket_key
"""
# in case we already initialized params, keep it
if self.params_initialized:
arg_params, aux_params = self.get_params()
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already bound, ignoring bind()')
return
assert shared_module is None, 'shared_module for BucketingModule is not supported'
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
self._grad_req = grad_req
symbol, data_names, label_names = self._call_sym_gen(self._default_bucket_key)
module = Module(symbol, data_names, label_names, logger=self.logger,
context=self._context, work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names,
group2ctxs=self._group2ctxs,
compression_params=self._compression_params)
module.bind(data_shapes, label_shapes, for_training, inputs_need_grad,
force_rebind=False, shared_module=None, grad_req=self._grad_req)
self._curr_module = module
self._curr_bucket_key = self._default_bucket_key
self._buckets[self._default_bucket_key] = module
# copy back saved params, if already initialized
if self.params_initialized:
self.set_params(arg_params, aux_params) | [
"def",
"bind",
"(",
"self",
",",
"data_shapes",
",",
"label_shapes",
"=",
"None",
",",
"for_training",
"=",
"True",
",",
"inputs_need_grad",
"=",
"False",
",",
"force_rebind",
"=",
"False",
",",
"shared_module",
"=",
"None",
",",
"grad_req",
"=",
"'write'",
")",
":",
"# in case we already initialized params, keep it",
"if",
"self",
".",
"params_initialized",
":",
"arg_params",
",",
"aux_params",
"=",
"self",
".",
"get_params",
"(",
")",
"# force rebinding is typically used when one want to switch from",
"# training to prediction phase.",
"if",
"force_rebind",
":",
"self",
".",
"_reset_bind",
"(",
")",
"if",
"self",
".",
"binded",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'Already bound, ignoring bind()'",
")",
"return",
"assert",
"shared_module",
"is",
"None",
",",
"'shared_module for BucketingModule is not supported'",
"self",
".",
"for_training",
"=",
"for_training",
"self",
".",
"inputs_need_grad",
"=",
"inputs_need_grad",
"self",
".",
"binded",
"=",
"True",
"self",
".",
"_grad_req",
"=",
"grad_req",
"symbol",
",",
"data_names",
",",
"label_names",
"=",
"self",
".",
"_call_sym_gen",
"(",
"self",
".",
"_default_bucket_key",
")",
"module",
"=",
"Module",
"(",
"symbol",
",",
"data_names",
",",
"label_names",
",",
"logger",
"=",
"self",
".",
"logger",
",",
"context",
"=",
"self",
".",
"_context",
",",
"work_load_list",
"=",
"self",
".",
"_work_load_list",
",",
"fixed_param_names",
"=",
"self",
".",
"_fixed_param_names",
",",
"state_names",
"=",
"self",
".",
"_state_names",
",",
"group2ctxs",
"=",
"self",
".",
"_group2ctxs",
",",
"compression_params",
"=",
"self",
".",
"_compression_params",
")",
"module",
".",
"bind",
"(",
"data_shapes",
",",
"label_shapes",
",",
"for_training",
",",
"inputs_need_grad",
",",
"force_rebind",
"=",
"False",
",",
"shared_module",
"=",
"None",
",",
"grad_req",
"=",
"self",
".",
"_grad_req",
")",
"self",
".",
"_curr_module",
"=",
"module",
"self",
".",
"_curr_bucket_key",
"=",
"self",
".",
"_default_bucket_key",
"self",
".",
"_buckets",
"[",
"self",
".",
"_default_bucket_key",
"]",
"=",
"module",
"# copy back saved params, if already initialized",
"if",
"self",
".",
"params_initialized",
":",
"self",
".",
"set_params",
"(",
"arg_params",
",",
"aux_params",
")"
] | Binding for a `BucketingModule` means setting up the buckets and binding the
executor for the default bucket key. Executors corresponding to other keys are
bound afterwards with `switch_bucket`.
Parameters
----------
data_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
label_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
for_training : bool
Default is ``True``.
inputs_need_grad : bool
Default is ``False``.
force_rebind : bool
Default is ``False``.
shared_module : BucketingModule
Default is ``None``. This value is currently not used.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
bucket_key : str (or any python object)
bucket key for binding. by default use the default_bucket_key | [
"Binding",
"for",
"a",
"BucketingModule",
"means",
"setting",
"up",
"the",
"buckets",
"and",
"binding",
"the",
"executor",
"for",
"the",
"default",
"bucket",
"key",
".",
"Executors",
"corresponding",
"to",
"other",
"keys",
"are",
"bound",
"afterwards",
"with",
"switch_bucket",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/bucketing_module.py#L289-L352 |
23,523 | apache/incubator-mxnet | python/mxnet/module/bucketing_module.py | BucketingModule.switch_bucket | def switch_bucket(self, bucket_key, data_shapes, label_shapes=None):
"""Switches to a different bucket. This will change ``self.curr_module``.
Parameters
----------
bucket_key : str (or any python object)
The key of the target bucket.
data_shapes : list of (str, tuple)
Typically ``data_batch.provide_data``.
label_shapes : list of (str, tuple)
Typically ``data_batch.provide_label``.
"""
assert self.binded, 'call bind before switching bucket'
if not bucket_key in self._buckets:
symbol, data_names, label_names = self._call_sym_gen(bucket_key)
module = Module(symbol, data_names, label_names,
logger=self.logger, context=self._context,
work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names,
group2ctxs=self._group2ctxs,
compression_params=self._compression_params)
module.bind(data_shapes, label_shapes, self._curr_module.for_training,
self._curr_module.inputs_need_grad,
force_rebind=False, shared_module=self._buckets[self._default_bucket_key],
grad_req=self._grad_req)
if self._monitor is not None:
module.install_monitor(self._monitor)
self._buckets[bucket_key] = module
self._curr_module = self._buckets[bucket_key]
self._curr_bucket_key = bucket_key | python | def switch_bucket(self, bucket_key, data_shapes, label_shapes=None):
"""Switches to a different bucket. This will change ``self.curr_module``.
Parameters
----------
bucket_key : str (or any python object)
The key of the target bucket.
data_shapes : list of (str, tuple)
Typically ``data_batch.provide_data``.
label_shapes : list of (str, tuple)
Typically ``data_batch.provide_label``.
"""
assert self.binded, 'call bind before switching bucket'
if not bucket_key in self._buckets:
symbol, data_names, label_names = self._call_sym_gen(bucket_key)
module = Module(symbol, data_names, label_names,
logger=self.logger, context=self._context,
work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names,
group2ctxs=self._group2ctxs,
compression_params=self._compression_params)
module.bind(data_shapes, label_shapes, self._curr_module.for_training,
self._curr_module.inputs_need_grad,
force_rebind=False, shared_module=self._buckets[self._default_bucket_key],
grad_req=self._grad_req)
if self._monitor is not None:
module.install_monitor(self._monitor)
self._buckets[bucket_key] = module
self._curr_module = self._buckets[bucket_key]
self._curr_bucket_key = bucket_key | [
"def",
"switch_bucket",
"(",
"self",
",",
"bucket_key",
",",
"data_shapes",
",",
"label_shapes",
"=",
"None",
")",
":",
"assert",
"self",
".",
"binded",
",",
"'call bind before switching bucket'",
"if",
"not",
"bucket_key",
"in",
"self",
".",
"_buckets",
":",
"symbol",
",",
"data_names",
",",
"label_names",
"=",
"self",
".",
"_call_sym_gen",
"(",
"bucket_key",
")",
"module",
"=",
"Module",
"(",
"symbol",
",",
"data_names",
",",
"label_names",
",",
"logger",
"=",
"self",
".",
"logger",
",",
"context",
"=",
"self",
".",
"_context",
",",
"work_load_list",
"=",
"self",
".",
"_work_load_list",
",",
"fixed_param_names",
"=",
"self",
".",
"_fixed_param_names",
",",
"state_names",
"=",
"self",
".",
"_state_names",
",",
"group2ctxs",
"=",
"self",
".",
"_group2ctxs",
",",
"compression_params",
"=",
"self",
".",
"_compression_params",
")",
"module",
".",
"bind",
"(",
"data_shapes",
",",
"label_shapes",
",",
"self",
".",
"_curr_module",
".",
"for_training",
",",
"self",
".",
"_curr_module",
".",
"inputs_need_grad",
",",
"force_rebind",
"=",
"False",
",",
"shared_module",
"=",
"self",
".",
"_buckets",
"[",
"self",
".",
"_default_bucket_key",
"]",
",",
"grad_req",
"=",
"self",
".",
"_grad_req",
")",
"if",
"self",
".",
"_monitor",
"is",
"not",
"None",
":",
"module",
".",
"install_monitor",
"(",
"self",
".",
"_monitor",
")",
"self",
".",
"_buckets",
"[",
"bucket_key",
"]",
"=",
"module",
"self",
".",
"_curr_module",
"=",
"self",
".",
"_buckets",
"[",
"bucket_key",
"]",
"self",
".",
"_curr_bucket_key",
"=",
"bucket_key"
] | Switches to a different bucket. This will change ``self.curr_module``.
Parameters
----------
bucket_key : str (or any python object)
The key of the target bucket.
data_shapes : list of (str, tuple)
Typically ``data_batch.provide_data``.
label_shapes : list of (str, tuple)
Typically ``data_batch.provide_label``. | [
"Switches",
"to",
"a",
"different",
"bucket",
".",
"This",
"will",
"change",
"self",
".",
"curr_module",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/bucketing_module.py#L354-L385 |
23,524 | apache/incubator-mxnet | python/mxnet/module/bucketing_module.py | BucketingModule.install_monitor | def install_monitor(self, mon):
"""Installs monitor on all executors """
assert self.binded
self._monitor = mon
for mod in self._buckets.values():
mod.install_monitor(mon) | python | def install_monitor(self, mon):
"""Installs monitor on all executors """
assert self.binded
self._monitor = mon
for mod in self._buckets.values():
mod.install_monitor(mon) | [
"def",
"install_monitor",
"(",
"self",
",",
"mon",
")",
":",
"assert",
"self",
".",
"binded",
"self",
".",
"_monitor",
"=",
"mon",
"for",
"mod",
"in",
"self",
".",
"_buckets",
".",
"values",
"(",
")",
":",
"mod",
".",
"install_monitor",
"(",
"mon",
")"
] | Installs monitor on all executors | [
"Installs",
"monitor",
"on",
"all",
"executors"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/bucketing_module.py#L541-L546 |
23,525 | apache/incubator-mxnet | python/mxnet/autograd.py | mark_variables | def mark_variables(variables, gradients, grad_reqs='write'):
"""Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str
"""
if isinstance(variables, NDArray):
assert isinstance(gradients, NDArray)
variables = [variables]
gradients = [gradients]
if isinstance(grad_reqs, string_types):
grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
else:
grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
check_call(_LIB.MXAutogradMarkVariables(
len(variables),
c_handle_array(variables),
c_array_buf(mx_uint, array('I', grad_reqs)),
c_handle_array(gradients))) | python | def mark_variables(variables, gradients, grad_reqs='write'):
"""Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str
"""
if isinstance(variables, NDArray):
assert isinstance(gradients, NDArray)
variables = [variables]
gradients = [gradients]
if isinstance(grad_reqs, string_types):
grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
else:
grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
check_call(_LIB.MXAutogradMarkVariables(
len(variables),
c_handle_array(variables),
c_array_buf(mx_uint, array('I', grad_reqs)),
c_handle_array(gradients))) | [
"def",
"mark_variables",
"(",
"variables",
",",
"gradients",
",",
"grad_reqs",
"=",
"'write'",
")",
":",
"if",
"isinstance",
"(",
"variables",
",",
"NDArray",
")",
":",
"assert",
"isinstance",
"(",
"gradients",
",",
"NDArray",
")",
"variables",
"=",
"[",
"variables",
"]",
"gradients",
"=",
"[",
"gradients",
"]",
"if",
"isinstance",
"(",
"grad_reqs",
",",
"string_types",
")",
":",
"grad_reqs",
"=",
"[",
"_GRAD_REQ_MAP",
"[",
"grad_reqs",
"]",
"]",
"*",
"len",
"(",
"variables",
")",
"else",
":",
"grad_reqs",
"=",
"[",
"_GRAD_REQ_MAP",
"[",
"i",
"]",
"for",
"i",
"in",
"grad_reqs",
"]",
"check_call",
"(",
"_LIB",
".",
"MXAutogradMarkVariables",
"(",
"len",
"(",
"variables",
")",
",",
"c_handle_array",
"(",
"variables",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"array",
"(",
"'I'",
",",
"grad_reqs",
")",
")",
",",
"c_handle_array",
"(",
"gradients",
")",
")",
")"
] | Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str | [
"Mark",
"NDArrays",
"as",
"variables",
"to",
"compute",
"gradient",
"for",
"autograd",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L197-L220 |
23,526 | apache/incubator-mxnet | python/mxnet/autograd.py | _parse_head | def _parse_head(heads, head_grads):
"""parse head gradient for backward and grad."""
if isinstance(heads, NDArray):
heads = [heads]
if isinstance(head_grads, NDArray):
head_grads = [head_grads]
head_handles = c_handle_array(heads)
if head_grads is None:
hgrad_handles = ctypes.c_void_p(0)
else:
assert len(heads) == len(head_grads), \
"heads and head_grads must be lists of the same length"
hgrad_handles = c_array(NDArrayHandle,
[i.handle if i is not None else NDArrayHandle(0)
for i in head_grads])
return head_handles, hgrad_handles | python | def _parse_head(heads, head_grads):
"""parse head gradient for backward and grad."""
if isinstance(heads, NDArray):
heads = [heads]
if isinstance(head_grads, NDArray):
head_grads = [head_grads]
head_handles = c_handle_array(heads)
if head_grads is None:
hgrad_handles = ctypes.c_void_p(0)
else:
assert len(heads) == len(head_grads), \
"heads and head_grads must be lists of the same length"
hgrad_handles = c_array(NDArrayHandle,
[i.handle if i is not None else NDArrayHandle(0)
for i in head_grads])
return head_handles, hgrad_handles | [
"def",
"_parse_head",
"(",
"heads",
",",
"head_grads",
")",
":",
"if",
"isinstance",
"(",
"heads",
",",
"NDArray",
")",
":",
"heads",
"=",
"[",
"heads",
"]",
"if",
"isinstance",
"(",
"head_grads",
",",
"NDArray",
")",
":",
"head_grads",
"=",
"[",
"head_grads",
"]",
"head_handles",
"=",
"c_handle_array",
"(",
"heads",
")",
"if",
"head_grads",
"is",
"None",
":",
"hgrad_handles",
"=",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
"else",
":",
"assert",
"len",
"(",
"heads",
")",
"==",
"len",
"(",
"head_grads",
")",
",",
"\"heads and head_grads must be lists of the same length\"",
"hgrad_handles",
"=",
"c_array",
"(",
"NDArrayHandle",
",",
"[",
"i",
".",
"handle",
"if",
"i",
"is",
"not",
"None",
"else",
"NDArrayHandle",
"(",
"0",
")",
"for",
"i",
"in",
"head_grads",
"]",
")",
"return",
"head_handles",
",",
"hgrad_handles"
] | parse head gradient for backward and grad. | [
"parse",
"head",
"gradient",
"for",
"backward",
"and",
"grad",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L223-L240 |
23,527 | apache/incubator-mxnet | python/mxnet/autograd.py | backward | def backward(heads, head_grads=None, retain_graph=False, train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting.
"""
head_handles, hgrad_handles = _parse_head(heads, head_grads)
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0))) | python | def backward(heads, head_grads=None, retain_graph=False, train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting.
"""
head_handles, hgrad_handles = _parse_head(heads, head_grads)
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0))) | [
"def",
"backward",
"(",
"heads",
",",
"head_grads",
"=",
"None",
",",
"retain_graph",
"=",
"False",
",",
"train_mode",
"=",
"True",
")",
":",
"#pylint: disable=redefined-outer-name",
"head_handles",
",",
"hgrad_handles",
"=",
"_parse_head",
"(",
"heads",
",",
"head_grads",
")",
"check_call",
"(",
"_LIB",
".",
"MXAutogradBackwardEx",
"(",
"len",
"(",
"head_handles",
")",
",",
"head_handles",
",",
"hgrad_handles",
",",
"0",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
",",
"ctypes",
".",
"c_int",
"(",
"retain_graph",
")",
",",
"ctypes",
".",
"c_int",
"(",
"0",
")",
",",
"ctypes",
".",
"c_int",
"(",
"train_mode",
")",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
")",
")"
] | Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting. | [
"Compute",
"the",
"gradients",
"of",
"heads",
"w",
".",
"r",
".",
"t",
"previously",
"marked",
"variables",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L243-L267 |
23,528 | apache/incubator-mxnet | python/mxnet/autograd.py | get_symbol | def get_symbol(x):
"""Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol.
"""
hdl = SymbolHandle()
check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl)))
return Symbol(hdl) | python | def get_symbol(x):
"""Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol.
"""
hdl = SymbolHandle()
check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl)))
return Symbol(hdl) | [
"def",
"get_symbol",
"(",
"x",
")",
":",
"hdl",
"=",
"SymbolHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXAutogradGetSymbol",
"(",
"x",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"Symbol",
"(",
"hdl",
")"
] | Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol. | [
"Retrieve",
"recorded",
"computation",
"history",
"as",
"Symbol",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L347-L362 |
23,529 | apache/incubator-mxnet | example/recommenders/movielens_data.py | load_mldataset | def load_mldataset(filename):
"""Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
"""
user = []
item = []
score = []
with open(filename) as f:
for line in f:
tks = line.strip().split('\t')
if len(tks) != 4:
continue
user.append(int(tks[0]))
item.append(int(tks[1]))
score.append(float(tks[2]))
user = mx.nd.array(user)
item = mx.nd.array(item)
score = mx.nd.array(score)
return gluon.data.ArrayDataset(user, item, score) | python | def load_mldataset(filename):
"""Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
"""
user = []
item = []
score = []
with open(filename) as f:
for line in f:
tks = line.strip().split('\t')
if len(tks) != 4:
continue
user.append(int(tks[0]))
item.append(int(tks[1]))
score.append(float(tks[2]))
user = mx.nd.array(user)
item = mx.nd.array(item)
score = mx.nd.array(score)
return gluon.data.ArrayDataset(user, item, score) | [
"def",
"load_mldataset",
"(",
"filename",
")",
":",
"user",
"=",
"[",
"]",
"item",
"=",
"[",
"]",
"score",
"=",
"[",
"]",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"tks",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"tks",
")",
"!=",
"4",
":",
"continue",
"user",
".",
"append",
"(",
"int",
"(",
"tks",
"[",
"0",
"]",
")",
")",
"item",
".",
"append",
"(",
"int",
"(",
"tks",
"[",
"1",
"]",
")",
")",
"score",
".",
"append",
"(",
"float",
"(",
"tks",
"[",
"2",
"]",
")",
")",
"user",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"user",
")",
"item",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"item",
")",
"score",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"score",
")",
"return",
"gluon",
".",
"data",
".",
"ArrayDataset",
"(",
"user",
",",
"item",
",",
"score",
")"
] | Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter | [
"Not",
"particularly",
"fast",
"code",
"to",
"parse",
"the",
"text",
"file",
"and",
"load",
"it",
"into",
"three",
"NDArray",
"s",
"and",
"product",
"an",
"NDArrayIter"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/recommenders/movielens_data.py#L25-L43 |
23,530 | apache/incubator-mxnet | tools/caffe_translator/scripts/convert_caffe_model.py | main | def main():
"""Read .caffemodel path and .params path as input from command line
and use CaffeModelConverter to do the conversion"""
parser = argparse.ArgumentParser(description='.caffemodel to MXNet .params converter.')
parser.add_argument('caffemodel', help='Path to the .caffemodel file to convert.')
parser.add_argument('output_file_name', help='Name of the output .params file.')
args = parser.parse_args()
converter = CaffeModelConverter()
converter.convert(args.caffemodel, args.output_file_name) | python | def main():
"""Read .caffemodel path and .params path as input from command line
and use CaffeModelConverter to do the conversion"""
parser = argparse.ArgumentParser(description='.caffemodel to MXNet .params converter.')
parser.add_argument('caffemodel', help='Path to the .caffemodel file to convert.')
parser.add_argument('output_file_name', help='Name of the output .params file.')
args = parser.parse_args()
converter = CaffeModelConverter()
converter.convert(args.caffemodel, args.output_file_name) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'.caffemodel to MXNet .params converter.'",
")",
"parser",
".",
"add_argument",
"(",
"'caffemodel'",
",",
"help",
"=",
"'Path to the .caffemodel file to convert.'",
")",
"parser",
".",
"add_argument",
"(",
"'output_file_name'",
",",
"help",
"=",
"'Name of the output .params file.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"converter",
"=",
"CaffeModelConverter",
"(",
")",
"converter",
".",
"convert",
"(",
"args",
".",
"caffemodel",
",",
"args",
".",
"output_file_name",
")"
] | Read .caffemodel path and .params path as input from command line
and use CaffeModelConverter to do the conversion | [
"Read",
".",
"caffemodel",
"path",
"and",
".",
"params",
"path",
"as",
"input",
"from",
"command",
"line",
"and",
"use",
"CaffeModelConverter",
"to",
"do",
"the",
"conversion"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L108-L118 |
23,531 | apache/incubator-mxnet | tools/caffe_translator/scripts/convert_caffe_model.py | CaffeModelConverter.add_param | def add_param(self, param_name, layer_index, blob_index):
"""Add a param to the .params file"""
blobs = self.layers[layer_index].blobs
self.dict_param[param_name] = mx.nd.array(caffe.io.blobproto_to_array(blobs[blob_index])) | python | def add_param(self, param_name, layer_index, blob_index):
"""Add a param to the .params file"""
blobs = self.layers[layer_index].blobs
self.dict_param[param_name] = mx.nd.array(caffe.io.blobproto_to_array(blobs[blob_index])) | [
"def",
"add_param",
"(",
"self",
",",
"param_name",
",",
"layer_index",
",",
"blob_index",
")",
":",
"blobs",
"=",
"self",
".",
"layers",
"[",
"layer_index",
"]",
".",
"blobs",
"self",
".",
"dict_param",
"[",
"param_name",
"]",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"caffe",
".",
"io",
".",
"blobproto_to_array",
"(",
"blobs",
"[",
"blob_index",
"]",
")",
")"
] | Add a param to the .params file | [
"Add",
"a",
"param",
"to",
"the",
".",
"params",
"file"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L33-L36 |
23,532 | apache/incubator-mxnet | tools/caffe_translator/scripts/convert_caffe_model.py | CaffeModelConverter.add_optional_arg_param | def add_optional_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param. If there is no such param in .caffemodel fie, silently ignore it."""
blobs = self.layers[layer_index].blobs
if blob_index < len(blobs):
self.add_arg_param(param_name, layer_index, blob_index) | python | def add_optional_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param. If there is no such param in .caffemodel fie, silently ignore it."""
blobs = self.layers[layer_index].blobs
if blob_index < len(blobs):
self.add_arg_param(param_name, layer_index, blob_index) | [
"def",
"add_optional_arg_param",
"(",
"self",
",",
"param_name",
",",
"layer_index",
",",
"blob_index",
")",
":",
"blobs",
"=",
"self",
".",
"layers",
"[",
"layer_index",
"]",
".",
"blobs",
"if",
"blob_index",
"<",
"len",
"(",
"blobs",
")",
":",
"self",
".",
"add_arg_param",
"(",
"param_name",
",",
"layer_index",
",",
"blob_index",
")"
] | Add an arg param. If there is no such param in .caffemodel fie, silently ignore it. | [
"Add",
"an",
"arg",
"param",
".",
"If",
"there",
"is",
"no",
"such",
"param",
"in",
".",
"caffemodel",
"fie",
"silently",
"ignore",
"it",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L46-L50 |
23,533 | apache/incubator-mxnet | tools/caffe_translator/scripts/convert_caffe_model.py | CaffeModelConverter.convert | def convert(self, caffemodel_path, outmodel_path):
"""Convert a Caffe .caffemodel file to MXNet .params file"""
net_param = caffe_pb2.NetParameter()
with open(caffemodel_path, 'rb') as caffe_model_file:
net_param.ParseFromString(caffe_model_file.read())
layers = net_param.layer
self.layers = layers
for idx, layer in enumerate(layers):
layer_name = str(layer.name)
if layer.blobs:
# If this is a layer that has only weight and bias as parameter
if layer.type == 'Convolution' or layer.type == 'InnerProduct' \
or layer.type == 'Deconvolution':
# Add weight and bias to the dictionary
self.add_arg_param('%s_weight' % layer_name, layer_index=idx, blob_index=0)
self.add_optional_arg_param('%s_bias' % layer_name, layer_index=idx,
blob_index=1)
elif layer.type == 'BatchNorm':
gamma_param_name = '%s_gamma' % layer_name
beta_param_name = '%s_beta' % layer_name
next_layer = layers[idx + 1]
if next_layer.type == 'Scale':
# If next layer is scale layer, get gamma and beta from there
self.add_arg_param(gamma_param_name, layer_index=idx+1, blob_index=0)
self.add_arg_param(beta_param_name, layer_index=idx+1, blob_index=1)
mean_param_name = '%s_moving_mean' % layer_name
var_param_name = '%s_moving_var' % layer_name
self.add_aux_param(mean_param_name, layer_index=idx, blob_index=0)
self.add_aux_param(var_param_name, layer_index=idx, blob_index=1)
elif layer.type == 'Scale':
prev_layer = layers[idx - 1]
if prev_layer.type == 'BatchNorm':
continue
else:
# Use the naming convention used by CaffeOp
self.add_arg_param('%s_0_weight' % layer_name, layer_index=idx,
blob_index=0)
self.add_optional_arg_param('%s_1_bias' % layer_name,
layer_index=idx, blob_index=1)
mx.nd.save(outmodel_path, self.dict_param) | python | def convert(self, caffemodel_path, outmodel_path):
"""Convert a Caffe .caffemodel file to MXNet .params file"""
net_param = caffe_pb2.NetParameter()
with open(caffemodel_path, 'rb') as caffe_model_file:
net_param.ParseFromString(caffe_model_file.read())
layers = net_param.layer
self.layers = layers
for idx, layer in enumerate(layers):
layer_name = str(layer.name)
if layer.blobs:
# If this is a layer that has only weight and bias as parameter
if layer.type == 'Convolution' or layer.type == 'InnerProduct' \
or layer.type == 'Deconvolution':
# Add weight and bias to the dictionary
self.add_arg_param('%s_weight' % layer_name, layer_index=idx, blob_index=0)
self.add_optional_arg_param('%s_bias' % layer_name, layer_index=idx,
blob_index=1)
elif layer.type == 'BatchNorm':
gamma_param_name = '%s_gamma' % layer_name
beta_param_name = '%s_beta' % layer_name
next_layer = layers[idx + 1]
if next_layer.type == 'Scale':
# If next layer is scale layer, get gamma and beta from there
self.add_arg_param(gamma_param_name, layer_index=idx+1, blob_index=0)
self.add_arg_param(beta_param_name, layer_index=idx+1, blob_index=1)
mean_param_name = '%s_moving_mean' % layer_name
var_param_name = '%s_moving_var' % layer_name
self.add_aux_param(mean_param_name, layer_index=idx, blob_index=0)
self.add_aux_param(var_param_name, layer_index=idx, blob_index=1)
elif layer.type == 'Scale':
prev_layer = layers[idx - 1]
if prev_layer.type == 'BatchNorm':
continue
else:
# Use the naming convention used by CaffeOp
self.add_arg_param('%s_0_weight' % layer_name, layer_index=idx,
blob_index=0)
self.add_optional_arg_param('%s_1_bias' % layer_name,
layer_index=idx, blob_index=1)
mx.nd.save(outmodel_path, self.dict_param) | [
"def",
"convert",
"(",
"self",
",",
"caffemodel_path",
",",
"outmodel_path",
")",
":",
"net_param",
"=",
"caffe_pb2",
".",
"NetParameter",
"(",
")",
"with",
"open",
"(",
"caffemodel_path",
",",
"'rb'",
")",
"as",
"caffe_model_file",
":",
"net_param",
".",
"ParseFromString",
"(",
"caffe_model_file",
".",
"read",
"(",
")",
")",
"layers",
"=",
"net_param",
".",
"layer",
"self",
".",
"layers",
"=",
"layers",
"for",
"idx",
",",
"layer",
"in",
"enumerate",
"(",
"layers",
")",
":",
"layer_name",
"=",
"str",
"(",
"layer",
".",
"name",
")",
"if",
"layer",
".",
"blobs",
":",
"# If this is a layer that has only weight and bias as parameter",
"if",
"layer",
".",
"type",
"==",
"'Convolution'",
"or",
"layer",
".",
"type",
"==",
"'InnerProduct'",
"or",
"layer",
".",
"type",
"==",
"'Deconvolution'",
":",
"# Add weight and bias to the dictionary",
"self",
".",
"add_arg_param",
"(",
"'%s_weight'",
"%",
"layer_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"0",
")",
"self",
".",
"add_optional_arg_param",
"(",
"'%s_bias'",
"%",
"layer_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"1",
")",
"elif",
"layer",
".",
"type",
"==",
"'BatchNorm'",
":",
"gamma_param_name",
"=",
"'%s_gamma'",
"%",
"layer_name",
"beta_param_name",
"=",
"'%s_beta'",
"%",
"layer_name",
"next_layer",
"=",
"layers",
"[",
"idx",
"+",
"1",
"]",
"if",
"next_layer",
".",
"type",
"==",
"'Scale'",
":",
"# If next layer is scale layer, get gamma and beta from there",
"self",
".",
"add_arg_param",
"(",
"gamma_param_name",
",",
"layer_index",
"=",
"idx",
"+",
"1",
",",
"blob_index",
"=",
"0",
")",
"self",
".",
"add_arg_param",
"(",
"beta_param_name",
",",
"layer_index",
"=",
"idx",
"+",
"1",
",",
"blob_index",
"=",
"1",
")",
"mean_param_name",
"=",
"'%s_moving_mean'",
"%",
"layer_name",
"var_param_name",
"=",
"'%s_moving_var'",
"%",
"layer_name",
"self",
".",
"add_aux_param",
"(",
"mean_param_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"0",
")",
"self",
".",
"add_aux_param",
"(",
"var_param_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"1",
")",
"elif",
"layer",
".",
"type",
"==",
"'Scale'",
":",
"prev_layer",
"=",
"layers",
"[",
"idx",
"-",
"1",
"]",
"if",
"prev_layer",
".",
"type",
"==",
"'BatchNorm'",
":",
"continue",
"else",
":",
"# Use the naming convention used by CaffeOp",
"self",
".",
"add_arg_param",
"(",
"'%s_0_weight'",
"%",
"layer_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"0",
")",
"self",
".",
"add_optional_arg_param",
"(",
"'%s_1_bias'",
"%",
"layer_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"1",
")",
"mx",
".",
"nd",
".",
"save",
"(",
"outmodel_path",
",",
"self",
".",
"dict_param",
")"
] | Convert a Caffe .caffemodel file to MXNet .params file | [
"Convert",
"a",
"Caffe",
".",
"caffemodel",
"file",
"to",
"MXNet",
".",
"params",
"file"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L52-L106 |
23,534 | apache/incubator-mxnet | python/mxnet/operator.py | CustomOp.assign | def assign(self, dst, req, src):
"""Helper function for assigning into dst depending on requirements."""
if req == 'null':
return
elif req in ('write', 'inplace'):
dst[:] = src
elif req == 'add':
dst[:] += src | python | def assign(self, dst, req, src):
"""Helper function for assigning into dst depending on requirements."""
if req == 'null':
return
elif req in ('write', 'inplace'):
dst[:] = src
elif req == 'add':
dst[:] += src | [
"def",
"assign",
"(",
"self",
",",
"dst",
",",
"req",
",",
"src",
")",
":",
"if",
"req",
"==",
"'null'",
":",
"return",
"elif",
"req",
"in",
"(",
"'write'",
",",
"'inplace'",
")",
":",
"dst",
"[",
":",
"]",
"=",
"src",
"elif",
"req",
"==",
"'add'",
":",
"dst",
"[",
":",
"]",
"+=",
"src"
] | Helper function for assigning into dst depending on requirements. | [
"Helper",
"function",
"for",
"assigning",
"into",
"dst",
"depending",
"on",
"requirements",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L463-L470 |
23,535 | apache/incubator-mxnet | python/mxnet/operator.py | CustomOpProp.infer_type | def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) | python | def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) | [
"def",
"infer_type",
"(",
"self",
",",
"in_type",
")",
":",
"return",
"in_type",
",",
"[",
"in_type",
"[",
"0",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_outputs",
"(",
")",
")",
",",
"[",
"in_type",
"[",
"0",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_auxiliary_states",
"(",
")",
")"
] | infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states. | [
"infer_type",
"interface",
".",
"override",
"to",
"create",
"new",
"operators"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L506-L527 |
23,536 | apache/incubator-mxnet | python/mxnet/operator.py | CustomOpProp.infer_storage_type | def infer_storage_type(self, in_stype):
"""infer_storage_type interface. Used to infer storage type of
inputs and outputs in the forward pass. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
in_stype : list of stypes, valid stypes are default, row_sparse and
csr
Returns
-------
in_stype : list
list of argument stypes.
out_stype : list
list of output types calculated from in_stype,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_stype,
in the same order as declared in list_auxiliary_states.
"""
for i, stype in enumerate(in_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type implementation doesnt allow non default stypes: " \
"found non default stype '%s' for in_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input/output stypes" % (stype, i)
return in_stype, \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_outputs()), \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_auxiliary_states()) | python | def infer_storage_type(self, in_stype):
"""infer_storage_type interface. Used to infer storage type of
inputs and outputs in the forward pass. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
in_stype : list of stypes, valid stypes are default, row_sparse and
csr
Returns
-------
in_stype : list
list of argument stypes.
out_stype : list
list of output types calculated from in_stype,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_stype,
in the same order as declared in list_auxiliary_states.
"""
for i, stype in enumerate(in_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type implementation doesnt allow non default stypes: " \
"found non default stype '%s' for in_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input/output stypes" % (stype, i)
return in_stype, \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_outputs()), \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_auxiliary_states()) | [
"def",
"infer_storage_type",
"(",
"self",
",",
"in_stype",
")",
":",
"for",
"i",
",",
"stype",
"in",
"enumerate",
"(",
"in_stype",
")",
":",
"assert",
"stype",
"==",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
",",
"\"Default infer_storage_type implementation doesnt allow non default stypes: \"",
"\"found non default stype '%s' for in_stype[%d]. Please implement \"",
"\"infer_storage_type and infer_storage_type_backward interface \"",
"\"in your custom operator if you have non-default input/output stypes\"",
"%",
"(",
"stype",
",",
"i",
")",
"return",
"in_stype",
",",
"[",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_outputs",
"(",
")",
")",
",",
"[",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_auxiliary_states",
"(",
")",
")"
] | infer_storage_type interface. Used to infer storage type of
inputs and outputs in the forward pass. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
in_stype : list of stypes, valid stypes are default, row_sparse and
csr
Returns
-------
in_stype : list
list of argument stypes.
out_stype : list
list of output types calculated from in_stype,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_stype,
in the same order as declared in list_auxiliary_states. | [
"infer_storage_type",
"interface",
".",
"Used",
"to",
"infer",
"storage",
"type",
"of",
"inputs",
"and",
"outputs",
"in",
"the",
"forward",
"pass",
".",
"When",
"this",
"interface",
"is",
"not",
"implemented",
"all",
"stypes",
"will",
"be",
"inferred",
"as",
"default",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L529-L558 |
23,537 | apache/incubator-mxnet | python/mxnet/operator.py | CustomOpProp.infer_storage_type_backward | def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
"""infer_storage_type_backward interface. Used to infer storage
type of inputs and outputs in the backward pass.
Will raise an error if undefined storage type is returned.
Returned lists have to be the same size as the input lists to infer_storage_type_backward,
otherwise an exception will be thrown. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
ograd_stype : list
list of output gradient storage types
in_stype : list
list of input storage types
out_stype : list
list of output storage types
igrad_stype : list
list of input gradient storage types
aux_stype : list
list of auxiliary storage types
Returns
-------
ograd_stype : list
list of inferred output gradient storage types
in_stype : list
list of inferred input storage types
out_stype : list
list of inferred output storage types
igrad_stype : list
list of inferred input gradient storage types
aux_stype : list
list of inferred storage types for auxiliary states
"""
for i, stype in enumerate(ograd_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for ograd_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default output gradient stypes" % (stype, i)
for i, stype in enumerate(igrad_stype):
if stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED]:
stype = _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for igrad_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input gradient stypes" % (stype, i)
stype_lists = [ograd_stype, in_stype, out_stype, igrad_stype, aux_stype]
for stype_list in stype_lists:
stype_list[:] = len(stype_list) * [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]
return stype_lists[0], stype_lists[1], stype_lists[2], stype_lists[3], stype_lists[4] | python | def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
"""infer_storage_type_backward interface. Used to infer storage
type of inputs and outputs in the backward pass.
Will raise an error if undefined storage type is returned.
Returned lists have to be the same size as the input lists to infer_storage_type_backward,
otherwise an exception will be thrown. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
ograd_stype : list
list of output gradient storage types
in_stype : list
list of input storage types
out_stype : list
list of output storage types
igrad_stype : list
list of input gradient storage types
aux_stype : list
list of auxiliary storage types
Returns
-------
ograd_stype : list
list of inferred output gradient storage types
in_stype : list
list of inferred input storage types
out_stype : list
list of inferred output storage types
igrad_stype : list
list of inferred input gradient storage types
aux_stype : list
list of inferred storage types for auxiliary states
"""
for i, stype in enumerate(ograd_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for ograd_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default output gradient stypes" % (stype, i)
for i, stype in enumerate(igrad_stype):
if stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED]:
stype = _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for igrad_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input gradient stypes" % (stype, i)
stype_lists = [ograd_stype, in_stype, out_stype, igrad_stype, aux_stype]
for stype_list in stype_lists:
stype_list[:] = len(stype_list) * [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]
return stype_lists[0], stype_lists[1], stype_lists[2], stype_lists[3], stype_lists[4] | [
"def",
"infer_storage_type_backward",
"(",
"self",
",",
"ograd_stype",
",",
"in_stype",
",",
"out_stype",
",",
"igrad_stype",
",",
"aux_stype",
")",
":",
"for",
"i",
",",
"stype",
"in",
"enumerate",
"(",
"ograd_stype",
")",
":",
"assert",
"stype",
"==",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
",",
"\"Default infer_storage_type_backward implementation doesnt allow non default stypes: \"",
"\"found non default stype '%s' for ograd_stype[%d]. Please implement \"",
"\"infer_storage_type and infer_storage_type_backward interface \"",
"\"in your custom operator if you have non-default output gradient stypes\"",
"%",
"(",
"stype",
",",
"i",
")",
"for",
"i",
",",
"stype",
"in",
"enumerate",
"(",
"igrad_stype",
")",
":",
"if",
"stype",
"==",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_UNDEFINED",
"]",
":",
"stype",
"=",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
"assert",
"stype",
"==",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
",",
"\"Default infer_storage_type_backward implementation doesnt allow non default stypes: \"",
"\"found non default stype '%s' for igrad_stype[%d]. Please implement \"",
"\"infer_storage_type and infer_storage_type_backward interface \"",
"\"in your custom operator if you have non-default input gradient stypes\"",
"%",
"(",
"stype",
",",
"i",
")",
"stype_lists",
"=",
"[",
"ograd_stype",
",",
"in_stype",
",",
"out_stype",
",",
"igrad_stype",
",",
"aux_stype",
"]",
"for",
"stype_list",
"in",
"stype_lists",
":",
"stype_list",
"[",
":",
"]",
"=",
"len",
"(",
"stype_list",
")",
"*",
"[",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
"]",
"return",
"stype_lists",
"[",
"0",
"]",
",",
"stype_lists",
"[",
"1",
"]",
",",
"stype_lists",
"[",
"2",
"]",
",",
"stype_lists",
"[",
"3",
"]",
",",
"stype_lists",
"[",
"4",
"]"
] | infer_storage_type_backward interface. Used to infer storage
type of inputs and outputs in the backward pass.
Will raise an error if undefined storage type is returned.
Returned lists have to be the same size as the input lists to infer_storage_type_backward,
otherwise an exception will be thrown. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
ograd_stype : list
list of output gradient storage types
in_stype : list
list of input storage types
out_stype : list
list of output storage types
igrad_stype : list
list of input gradient storage types
aux_stype : list
list of auxiliary storage types
Returns
-------
ograd_stype : list
list of inferred output gradient storage types
in_stype : list
list of inferred input storage types
out_stype : list
list of inferred output storage types
igrad_stype : list
list of inferred input gradient storage types
aux_stype : list
list of inferred storage types for auxiliary states | [
"infer_storage_type_backward",
"interface",
".",
"Used",
"to",
"infer",
"storage",
"type",
"of",
"inputs",
"and",
"outputs",
"in",
"the",
"backward",
"pass",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L560-L612 |
23,538 | apache/incubator-mxnet | python/mxnet/operator.py | _Registry.inc | def inc(self):
"""Get index for new entry."""
self.lock.acquire()
cur = self.counter
self.counter += 1
self.lock.release()
return cur | python | def inc(self):
"""Get index for new entry."""
self.lock.acquire()
cur = self.counter
self.counter += 1
self.lock.release()
return cur | [
"def",
"inc",
"(",
"self",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"cur",
"=",
"self",
".",
"counter",
"self",
".",
"counter",
"+=",
"1",
"self",
".",
"lock",
".",
"release",
"(",
")",
"return",
"cur"
] | Get index for new entry. | [
"Get",
"index",
"for",
"new",
"entry",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L682-L688 |
23,539 | apache/incubator-mxnet | tools/rec2idx.py | IndexCreator.close | def close(self):
"""Closes the record and index files."""
if not self.is_open:
return
super(IndexCreator, self).close()
self.fidx.close() | python | def close(self):
"""Closes the record and index files."""
if not self.is_open:
return
super(IndexCreator, self).close()
self.fidx.close() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_open",
":",
"return",
"super",
"(",
"IndexCreator",
",",
"self",
")",
".",
"close",
"(",
")",
"self",
".",
"fidx",
".",
"close",
"(",
")"
] | Closes the record and index files. | [
"Closes",
"the",
"record",
"and",
"index",
"files",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/rec2idx.py#L58-L63 |
23,540 | apache/incubator-mxnet | tools/rec2idx.py | IndexCreator.tell | def tell(self):
"""Returns the current position of read head.
"""
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value | python | def tell(self):
"""Returns the current position of read head.
"""
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value | [
"def",
"tell",
"(",
"self",
")",
":",
"pos",
"=",
"ctypes",
".",
"c_size_t",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOReaderTell",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"pos",
")",
")",
")",
"return",
"pos",
".",
"value"
] | Returns the current position of read head. | [
"Returns",
"the",
"current",
"position",
"of",
"read",
"head",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/rec2idx.py#L65-L70 |
23,541 | apache/incubator-mxnet | tools/rec2idx.py | IndexCreator.create_index | def create_index(self):
"""Creates the index file from open record file
"""
self.reset()
counter = 0
pre_time = time.time()
while True:
if counter % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', counter)
pos = self.tell()
cont = self.read()
if cont is None:
break
key = self.key_type(counter)
self.fidx.write('%s\t%d\n'%(str(key), pos))
counter = counter + 1 | python | def create_index(self):
"""Creates the index file from open record file
"""
self.reset()
counter = 0
pre_time = time.time()
while True:
if counter % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', counter)
pos = self.tell()
cont = self.read()
if cont is None:
break
key = self.key_type(counter)
self.fidx.write('%s\t%d\n'%(str(key), pos))
counter = counter + 1 | [
"def",
"create_index",
"(",
"self",
")",
":",
"self",
".",
"reset",
"(",
")",
"counter",
"=",
"0",
"pre_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"True",
":",
"if",
"counter",
"%",
"1000",
"==",
"0",
":",
"cur_time",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'time:'",
",",
"cur_time",
"-",
"pre_time",
",",
"' count:'",
",",
"counter",
")",
"pos",
"=",
"self",
".",
"tell",
"(",
")",
"cont",
"=",
"self",
".",
"read",
"(",
")",
"if",
"cont",
"is",
"None",
":",
"break",
"key",
"=",
"self",
".",
"key_type",
"(",
"counter",
")",
"self",
".",
"fidx",
".",
"write",
"(",
"'%s\\t%d\\n'",
"%",
"(",
"str",
"(",
"key",
")",
",",
"pos",
")",
")",
"counter",
"=",
"counter",
"+",
"1"
] | Creates the index file from open record file | [
"Creates",
"the",
"index",
"file",
"from",
"open",
"record",
"file"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/rec2idx.py#L72-L88 |
23,542 | apache/incubator-mxnet | docs/mxdoc.py | _run_cmd | def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err | python | def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err | [
"def",
"_run_cmd",
"(",
"cmds",
")",
":",
"if",
"not",
"isinstance",
"(",
"cmds",
",",
"str",
")",
":",
"cmds",
"=",
"\"\"",
".",
"join",
"(",
"cmds",
")",
"print",
"(",
"\"Execute \\\"%s\\\"\"",
"%",
"cmds",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"cmds",
",",
"shell",
"=",
"True",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"err",
":",
"print",
"(",
"err",
")",
"raise",
"err"
] | Run commands, raise exception if failed | [
"Run",
"commands",
"raise",
"exception",
"if",
"failed"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L73-L82 |
23,543 | apache/incubator-mxnet | docs/mxdoc.py | generate_doxygen | def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir) | python | def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir) | [
"def",
"generate_doxygen",
"(",
"app",
")",
":",
"_run_cmd",
"(",
"\"cd %s/.. && make doxygen\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"_run_cmd",
"(",
"\"cp -rf doxygen/html %s/doxygen\"",
"%",
"app",
".",
"builder",
".",
"outdir",
")"
] | Run the doxygen make commands | [
"Run",
"the",
"doxygen",
"make",
"commands"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L84-L87 |
23,544 | apache/incubator-mxnet | docs/mxdoc.py | build_mxnet | def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir) | python | def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir) | [
"def",
"build_mxnet",
"(",
"app",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"builder",
".",
"srcdir",
",",
"'..'",
",",
"'config.mk'",
")",
")",
":",
"_run_cmd",
"(",
"\"cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 \"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"else",
":",
"_run_cmd",
"(",
"\"cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 \"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")"
] | Build mxnet .so lib | [
"Build",
"mxnet",
".",
"so",
"lib"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L89-L96 |
23,545 | apache/incubator-mxnet | docs/mxdoc.py | build_r_docs | def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = app.builder.srcdir + '/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path) | python | def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = app.builder.srcdir + '/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path) | [
"def",
"build_r_docs",
"(",
"app",
")",
":",
"r_root",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../R-package'",
"pdf_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/api/r/mxnet-r-reference-manual.pdf'",
"_run_cmd",
"(",
"'cd '",
"+",
"r_root",
"+",
"'; R -e \"roxygen2::roxygenize()\"; R CMD Rd2pdf . --no-preview -o '",
"+",
"pdf_path",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/api/r/'",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
"+",
"'; mv '",
"+",
"pdf_path",
"+",
"' '",
"+",
"dest_path",
")"
] | build r pdf | [
"build",
"r",
"pdf"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L98-L105 |
23,546 | apache/incubator-mxnet | docs/mxdoc.py | build_scala | def build_scala(app):
"""build scala for scala docs, java docs, and clojure docs to use"""
if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']):
_run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir)
_run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir)
else:
_run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir) | python | def build_scala(app):
"""build scala for scala docs, java docs, and clojure docs to use"""
if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']):
_run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir)
_run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir)
else:
_run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir) | [
"def",
"build_scala",
"(",
"app",
")",
":",
"if",
"any",
"(",
"v",
"in",
"_BUILD_VER",
"for",
"v",
"in",
"[",
"'1.2.'",
",",
"'1.3.'",
",",
"'1.4.'",
"]",
")",
":",
"_run_cmd",
"(",
"\"cd %s/.. && make scalapkg\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"_run_cmd",
"(",
"\"cd %s/.. && make scalainstall\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"else",
":",
"_run_cmd",
"(",
"\"cd %s/../scala-package && mvn -B install -DskipTests\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")"
] | build scala for scala docs, java docs, and clojure docs to use | [
"build",
"scala",
"for",
"scala",
"docs",
"java",
"docs",
"and",
"clojure",
"docs",
"to",
"use"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L107-L113 |
23,547 | apache/incubator-mxnet | docs/mxdoc.py | build_scala_docs | def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package'
scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"'
scala_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
# There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions
scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else ''
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}'
.format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors))
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
# 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x
scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') | python | def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package'
scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"'
scala_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
# There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions
scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else ''
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}'
.format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors))
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
# 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x
scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') | [
"def",
"build_scala_docs",
"(",
"app",
")",
":",
"scala_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../scala-package'",
"scala_doc_sources",
"=",
"'find . -type f -name \"*.scala\" | egrep \\\"\\.\\/core|\\.\\/infer\\\" | egrep -v \\\"\\/javaapi\\\" | egrep -v \\\"Suite\\\"'",
"scala_doc_classpath",
"=",
"':'",
".",
"join",
"(",
"[",
"'`find native -name \"*.jar\" | grep \"target/lib/\" | tr \"\\\\n\" \":\" `'",
",",
"'`find macros -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
",",
"'`find core -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
",",
"'`find infer -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
"]",
")",
"# There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions",
"scala_ignore_errors",
"=",
"'; exit 0'",
"if",
"any",
"(",
"v",
"in",
"_BUILD_VER",
"for",
"v",
"in",
"[",
"'1.2.'",
",",
"'1.3.'",
"]",
")",
"else",
"''",
"_run_cmd",
"(",
"'cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}'",
".",
"format",
"(",
"scala_path",
",",
"scala_doc_sources",
",",
"scala_doc_classpath",
",",
"scala_ignore_errors",
")",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/api/scala/docs'",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"# 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x",
"scaladocs",
"=",
"[",
"'index'",
",",
"'index.html'",
",",
"'org'",
",",
"'lib'",
",",
"'index.js'",
",",
"'package.html'",
"]",
"for",
"doc_file",
"in",
"scaladocs",
":",
"_run_cmd",
"(",
"'cd '",
"+",
"scala_path",
"+",
"' && mv -f '",
"+",
"doc_file",
"+",
"' '",
"+",
"dest_path",
"+",
"'; exit 0'",
")"
] | build scala doc and then move the outdir | [
"build",
"scala",
"doc",
"and",
"then",
"move",
"the",
"outdir"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L115-L135 |
23,548 | apache/incubator-mxnet | docs/mxdoc.py | build_java_docs | def build_java_docs(app):
"""build java docs and then move the outdir"""
java_path = app.builder.srcdir + '/../scala-package'
java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"'
java_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation'
.format(java_path, java_doc_sources, java_doc_classpath))
dest_path = app.builder.outdir + '/api/java/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in javadocs:
_run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') | python | def build_java_docs(app):
"""build java docs and then move the outdir"""
java_path = app.builder.srcdir + '/../scala-package'
java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"'
java_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation'
.format(java_path, java_doc_sources, java_doc_classpath))
dest_path = app.builder.outdir + '/api/java/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in javadocs:
_run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') | [
"def",
"build_java_docs",
"(",
"app",
")",
":",
"java_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../scala-package'",
"java_doc_sources",
"=",
"'find . -type f -name \"*.scala\" | egrep \\\"\\.\\/core|\\.\\/infer\\\" | egrep \\\"\\/javaapi\\\" | egrep -v \\\"Suite\\\"'",
"java_doc_classpath",
"=",
"':'",
".",
"join",
"(",
"[",
"'`find native -name \"*.jar\" | grep \"target/lib/\" | tr \"\\\\n\" \":\" `'",
",",
"'`find macros -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
",",
"'`find core -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
",",
"'`find infer -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
"]",
")",
"_run_cmd",
"(",
"'cd {}; scaladoc `{}` -classpath {} -feature -deprecation'",
".",
"format",
"(",
"java_path",
",",
"java_doc_sources",
",",
"java_doc_classpath",
")",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/api/java/docs'",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"javadocs",
"=",
"[",
"'index'",
",",
"'index.html'",
",",
"'org'",
",",
"'lib'",
",",
"'index.js'",
",",
"'package.html'",
"]",
"for",
"doc_file",
"in",
"javadocs",
":",
"_run_cmd",
"(",
"'cd '",
"+",
"java_path",
"+",
"' && mv -f '",
"+",
"doc_file",
"+",
"' '",
"+",
"dest_path",
"+",
"'; exit 0'",
")"
] | build java docs and then move the outdir | [
"build",
"java",
"docs",
"and",
"then",
"move",
"the",
"outdir"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L137-L154 |
23,549 | apache/incubator-mxnet | docs/mxdoc.py | build_clojure_docs | def build_clojure_docs(app):
"""build clojure doc and then move the outdir"""
clojure_path = app.builder.srcdir + '/../contrib/clojure-package'
_run_cmd('cd ' + clojure_path + '; lein codox')
dest_path = app.builder.outdir + '/api/clojure/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc'
_run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0') | python | def build_clojure_docs(app):
"""build clojure doc and then move the outdir"""
clojure_path = app.builder.srcdir + '/../contrib/clojure-package'
_run_cmd('cd ' + clojure_path + '; lein codox')
dest_path = app.builder.outdir + '/api/clojure/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc'
_run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0') | [
"def",
"build_clojure_docs",
"(",
"app",
")",
":",
"clojure_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../contrib/clojure-package'",
"_run_cmd",
"(",
"'cd '",
"+",
"clojure_path",
"+",
"'; lein codox'",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/api/clojure/docs'",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"clojure_doc_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../contrib/clojure-package/target/doc'",
"_run_cmd",
"(",
"'cd '",
"+",
"clojure_doc_path",
"+",
"' && cp -r * '",
"+",
"dest_path",
"+",
"'; exit 0'",
")"
] | build clojure doc and then move the outdir | [
"build",
"clojure",
"doc",
"and",
"then",
"move",
"the",
"outdir"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L156-L164 |
23,550 | apache/incubator-mxnet | docs/mxdoc.py | _convert_md_table_to_rst | def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out | python | def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out | [
"def",
"_convert_md_table_to_rst",
"(",
"table",
")",
":",
"if",
"len",
"(",
"table",
")",
"<",
"3",
":",
"return",
"''",
"out",
"=",
"'```eval_rst\\n.. list-table::\\n :header-rows: 1\\n\\n'",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"table",
")",
":",
"cols",
"=",
"l",
".",
"split",
"(",
"'|'",
")",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"i",
"==",
"0",
":",
"ncol",
"=",
"len",
"(",
"cols",
")",
"else",
":",
"if",
"len",
"(",
"cols",
")",
"!=",
"ncol",
":",
"return",
"''",
"if",
"i",
"==",
"1",
":",
"for",
"c",
"in",
"cols",
":",
"if",
"len",
"(",
"c",
")",
"is",
"not",
"0",
"and",
"'---'",
"not",
"in",
"c",
":",
"return",
"''",
"else",
":",
"for",
"j",
",",
"c",
"in",
"enumerate",
"(",
"cols",
")",
":",
"out",
"+=",
"' * - '",
"if",
"j",
"==",
"0",
"else",
"' - '",
"out",
"+=",
"pypandoc",
".",
"convert_text",
"(",
"c",
",",
"'rst'",
",",
"format",
"=",
"'md'",
")",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
".",
"replace",
"(",
"'\\r'",
",",
"''",
")",
"+",
"'\\n'",
"out",
"+=",
"'```\\n'",
"return",
"out"
] | Convert a markdown table to rst format | [
"Convert",
"a",
"markdown",
"table",
"to",
"rst",
"format"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L166-L188 |
23,551 | apache/incubator-mxnet | docs/mxdoc.py | convert_table | def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname)) | python | def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname)) | [
"def",
"convert_table",
"(",
"app",
",",
"docname",
",",
"source",
")",
":",
"num_tables",
"=",
"0",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"source",
")",
":",
"table",
"=",
"[",
"]",
"output",
"=",
"''",
"in_table",
"=",
"False",
"for",
"l",
"in",
"j",
".",
"split",
"(",
"'\\n'",
")",
":",
"r",
"=",
"l",
".",
"strip",
"(",
")",
"if",
"r",
".",
"startswith",
"(",
"'|'",
")",
":",
"table",
".",
"append",
"(",
"r",
")",
"in_table",
"=",
"True",
"else",
":",
"if",
"in_table",
"is",
"True",
":",
"converted",
"=",
"_convert_md_table_to_rst",
"(",
"table",
")",
"if",
"converted",
"is",
"''",
":",
"print",
"(",
"\"Failed to convert the markdown table\"",
")",
"print",
"(",
"table",
")",
"else",
":",
"num_tables",
"+=",
"1",
"output",
"+=",
"converted",
"in_table",
"=",
"False",
"table",
"=",
"[",
"]",
"output",
"+=",
"l",
"+",
"'\\n'",
"source",
"[",
"i",
"]",
"=",
"output",
"if",
"num_tables",
">",
"0",
":",
"print",
"(",
"'Converted %d tables in %s'",
"%",
"(",
"num_tables",
",",
"docname",
")",
")"
] | Find tables in a markdown and then convert them into the rst format | [
"Find",
"tables",
"in",
"a",
"markdown",
"and",
"then",
"convert",
"them",
"into",
"the",
"rst",
"format"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L191-L217 |
23,552 | apache/incubator-mxnet | docs/mxdoc.py | _parse_code_lines | def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent) | python | def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent) | [
"def",
"_parse_code_lines",
"(",
"lines",
")",
":",
"in_code",
"=",
"False",
"lang",
"=",
"None",
"indent",
"=",
"None",
"for",
"l",
"in",
"lines",
":",
"m",
"=",
"_CODE_MARK",
".",
"match",
"(",
"l",
")",
"if",
"m",
"is",
"not",
"None",
":",
"if",
"not",
"in_code",
":",
"if",
"m",
".",
"groups",
"(",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"in",
"_LANGS",
":",
"lang",
"=",
"m",
".",
"groups",
"(",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"indent",
"=",
"len",
"(",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"in_code",
"=",
"True",
"yield",
"(",
"l",
",",
"in_code",
",",
"lang",
",",
"indent",
")",
"else",
":",
"yield",
"(",
"l",
",",
"in_code",
",",
"lang",
",",
"indent",
")",
"lang",
"=",
"None",
"indent",
"=",
"None",
"in_code",
"=",
"False",
"else",
":",
"yield",
"(",
"l",
",",
"in_code",
",",
"lang",
",",
"indent",
")"
] | A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent | [
"A",
"iterator",
"that",
"returns",
"if",
"a",
"line",
"is",
"within",
"a",
"code",
"block"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L219-L248 |
23,553 | apache/incubator-mxnet | docs/mxdoc.py | _get_blocks | def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block) | python | def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block) | [
"def",
"_get_blocks",
"(",
"lines",
")",
":",
"cur_block",
"=",
"[",
"]",
"pre_lang",
"=",
"None",
"pre_in_code",
"=",
"None",
"for",
"(",
"l",
",",
"in_code",
",",
"cur_lang",
",",
"_",
")",
"in",
"_parse_code_lines",
"(",
"lines",
")",
":",
"if",
"in_code",
"!=",
"pre_in_code",
":",
"if",
"pre_in_code",
"and",
"len",
"(",
"cur_block",
")",
">=",
"2",
":",
"cur_block",
"=",
"cur_block",
"[",
"1",
":",
"-",
"1",
"]",
"# remove ```",
"# remove empty lines at head",
"while",
"len",
"(",
"cur_block",
")",
">",
"0",
":",
"if",
"len",
"(",
"cur_block",
"[",
"0",
"]",
")",
"==",
"0",
":",
"cur_block",
".",
"pop",
"(",
"0",
")",
"else",
":",
"break",
"# remove empty lines at tail",
"while",
"len",
"(",
"cur_block",
")",
">",
"0",
":",
"if",
"len",
"(",
"cur_block",
"[",
"-",
"1",
"]",
")",
"==",
"0",
":",
"cur_block",
".",
"pop",
"(",
")",
"else",
":",
"break",
"if",
"len",
"(",
"cur_block",
")",
":",
"yield",
"(",
"pre_in_code",
",",
"pre_lang",
",",
"cur_block",
")",
"cur_block",
"=",
"[",
"]",
"cur_block",
".",
"append",
"(",
"l",
")",
"pre_lang",
"=",
"cur_lang",
"pre_in_code",
"=",
"in_code",
"if",
"len",
"(",
"cur_block",
")",
":",
"yield",
"(",
"pre_in_code",
",",
"pre_lang",
",",
"cur_block",
")"
] | split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source | [
"split",
"lines",
"into",
"code",
"and",
"non",
"-",
"code",
"blocks"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L260-L296 |
23,554 | apache/incubator-mxnet | docs/mxdoc.py | _get_python_block_output | def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err) | python | def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err) | [
"def",
"_get_python_block_output",
"(",
"src",
",",
"global_dict",
",",
"local_dict",
")",
":",
"src",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"src",
".",
"split",
"(",
"'\\n'",
")",
"if",
"not",
"l",
".",
"startswith",
"(",
"'%'",
")",
"and",
"not",
"'plt.show()'",
"in",
"l",
"]",
")",
"ret_status",
"=",
"True",
"err",
"=",
"''",
"with",
"_string_io",
"(",
")",
"as",
"s",
":",
"try",
":",
"exec",
"(",
"src",
",",
"global_dict",
",",
"global_dict",
")",
"except",
"Exception",
"as",
"e",
":",
"err",
"=",
"str",
"(",
"e",
")",
"ret_status",
"=",
"False",
"return",
"(",
"ret_status",
",",
"s",
".",
"getvalue",
"(",
")",
"+",
"err",
")"
] | Evaluate python source codes
Returns
(bool, str):
- True if success
- output | [
"Evaluate",
"python",
"source",
"codes"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L321-L339 |
23,555 | apache/incubator-mxnet | docs/mxdoc.py | copy_artifacts | def copy_artifacts(app):
"""Copies artifacts needed for website presentation"""
dest_path = app.builder.outdir + '/error'
source_path = app.builder.srcdir + '/build_version_doc/artifacts'
_run_cmd('cd ' + app.builder.srcdir)
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + source_path + '/404.html ' + dest_path)
_run_cmd('cp ' + source_path + '/api.html ' + dest_path)
dest_path = app.builder.outdir + '/_static'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path) | python | def copy_artifacts(app):
"""Copies artifacts needed for website presentation"""
dest_path = app.builder.outdir + '/error'
source_path = app.builder.srcdir + '/build_version_doc/artifacts'
_run_cmd('cd ' + app.builder.srcdir)
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + source_path + '/404.html ' + dest_path)
_run_cmd('cp ' + source_path + '/api.html ' + dest_path)
dest_path = app.builder.outdir + '/_static'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path) | [
"def",
"copy_artifacts",
"(",
"app",
")",
":",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/error'",
"source_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/build_version_doc/artifacts'",
"_run_cmd",
"(",
"'cd '",
"+",
"app",
".",
"builder",
".",
"srcdir",
")",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'cp '",
"+",
"source_path",
"+",
"'/404.html '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'cp '",
"+",
"source_path",
"+",
"'/api.html '",
"+",
"dest_path",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/_static'",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'cp '",
"+",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/_static/mxnet.css '",
"+",
"dest_path",
")"
] | Copies artifacts needed for website presentation | [
"Copies",
"artifacts",
"needed",
"for",
"website",
"presentation"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L443-L455 |
23,556 | apache/incubator-mxnet | tools/caffe_converter/convert_caffe_modelzoo.py | download_caffe_model | def download_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download caffe model into disk by the given meta info """
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
model_name = os.path.join(dst_dir, model_name)
assert 'prototxt' in meta_info, "missing prototxt url"
proto_url, proto_sha1 = meta_info['prototxt']
prototxt = mx.gluon.utils.download(proto_url,
model_name+'_deploy.prototxt',
sha1_hash=proto_sha1)
assert 'caffemodel' in meta_info, "mssing caffemodel url"
caffemodel_url, caffemodel_sha1 = meta_info['caffemodel']
caffemodel = mx.gluon.utils.download(caffemodel_url,
model_name+'.caffemodel',
sha1_hash=caffemodel_sha1)
assert 'mean' in meta_info, 'no mean info'
mean = meta_info['mean']
if isinstance(mean[0], str):
mean_url, mean_sha1 = mean
mean = mx.gluon.utils.download(mean_url,
model_name+'_mean.binaryproto',
sha1_hash=mean_sha1)
return (prototxt, caffemodel, mean) | python | def download_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download caffe model into disk by the given meta info """
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
model_name = os.path.join(dst_dir, model_name)
assert 'prototxt' in meta_info, "missing prototxt url"
proto_url, proto_sha1 = meta_info['prototxt']
prototxt = mx.gluon.utils.download(proto_url,
model_name+'_deploy.prototxt',
sha1_hash=proto_sha1)
assert 'caffemodel' in meta_info, "mssing caffemodel url"
caffemodel_url, caffemodel_sha1 = meta_info['caffemodel']
caffemodel = mx.gluon.utils.download(caffemodel_url,
model_name+'.caffemodel',
sha1_hash=caffemodel_sha1)
assert 'mean' in meta_info, 'no mean info'
mean = meta_info['mean']
if isinstance(mean[0], str):
mean_url, mean_sha1 = mean
mean = mx.gluon.utils.download(mean_url,
model_name+'_mean.binaryproto',
sha1_hash=mean_sha1)
return (prototxt, caffemodel, mean) | [
"def",
"download_caffe_model",
"(",
"model_name",
",",
"meta_info",
",",
"dst_dir",
"=",
"'./model'",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dst_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"dst_dir",
")",
"model_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst_dir",
",",
"model_name",
")",
"assert",
"'prototxt'",
"in",
"meta_info",
",",
"\"missing prototxt url\"",
"proto_url",
",",
"proto_sha1",
"=",
"meta_info",
"[",
"'prototxt'",
"]",
"prototxt",
"=",
"mx",
".",
"gluon",
".",
"utils",
".",
"download",
"(",
"proto_url",
",",
"model_name",
"+",
"'_deploy.prototxt'",
",",
"sha1_hash",
"=",
"proto_sha1",
")",
"assert",
"'caffemodel'",
"in",
"meta_info",
",",
"\"mssing caffemodel url\"",
"caffemodel_url",
",",
"caffemodel_sha1",
"=",
"meta_info",
"[",
"'caffemodel'",
"]",
"caffemodel",
"=",
"mx",
".",
"gluon",
".",
"utils",
".",
"download",
"(",
"caffemodel_url",
",",
"model_name",
"+",
"'.caffemodel'",
",",
"sha1_hash",
"=",
"caffemodel_sha1",
")",
"assert",
"'mean'",
"in",
"meta_info",
",",
"'no mean info'",
"mean",
"=",
"meta_info",
"[",
"'mean'",
"]",
"if",
"isinstance",
"(",
"mean",
"[",
"0",
"]",
",",
"str",
")",
":",
"mean_url",
",",
"mean_sha1",
"=",
"mean",
"mean",
"=",
"mx",
".",
"gluon",
".",
"utils",
".",
"download",
"(",
"mean_url",
",",
"model_name",
"+",
"'_mean.binaryproto'",
",",
"sha1_hash",
"=",
"mean_sha1",
")",
"return",
"(",
"prototxt",
",",
"caffemodel",
",",
"mean",
")"
] | Download caffe model into disk by the given meta info | [
"Download",
"caffe",
"model",
"into",
"disk",
"by",
"the",
"given",
"meta",
"info"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/convert_caffe_modelzoo.py#L118-L142 |
23,557 | apache/incubator-mxnet | tools/caffe_converter/convert_caffe_modelzoo.py | convert_caffe_model | def convert_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download, convert and save a caffe model"""
(prototxt, caffemodel, mean) = download_caffe_model(model_name, meta_info, dst_dir)
model_name = os.path.join(dst_dir, model_name)
convert_model(prototxt, caffemodel, model_name)
if isinstance(mean, str):
mx_mean = model_name + '-mean.nd'
convert_mean(mean, mx_mean)
mean = mx_mean
return (model_name, mean) | python | def convert_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download, convert and save a caffe model"""
(prototxt, caffemodel, mean) = download_caffe_model(model_name, meta_info, dst_dir)
model_name = os.path.join(dst_dir, model_name)
convert_model(prototxt, caffemodel, model_name)
if isinstance(mean, str):
mx_mean = model_name + '-mean.nd'
convert_mean(mean, mx_mean)
mean = mx_mean
return (model_name, mean) | [
"def",
"convert_caffe_model",
"(",
"model_name",
",",
"meta_info",
",",
"dst_dir",
"=",
"'./model'",
")",
":",
"(",
"prototxt",
",",
"caffemodel",
",",
"mean",
")",
"=",
"download_caffe_model",
"(",
"model_name",
",",
"meta_info",
",",
"dst_dir",
")",
"model_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst_dir",
",",
"model_name",
")",
"convert_model",
"(",
"prototxt",
",",
"caffemodel",
",",
"model_name",
")",
"if",
"isinstance",
"(",
"mean",
",",
"str",
")",
":",
"mx_mean",
"=",
"model_name",
"+",
"'-mean.nd'",
"convert_mean",
"(",
"mean",
",",
"mx_mean",
")",
"mean",
"=",
"mx_mean",
"return",
"(",
"model_name",
",",
"mean",
")"
] | Download, convert and save a caffe model | [
"Download",
"convert",
"and",
"save",
"a",
"caffe",
"model"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/convert_caffe_modelzoo.py#L144-L154 |
23,558 | apache/incubator-mxnet | example/gluon/lipnet/utils/multi.py | multi_p_run | def multi_p_run(tot_num, _func, worker, params, n_process):
"""
Run _func with multi-process using params.
"""
from multiprocessing import Process, Queue
out_q = Queue()
procs = []
split_num = split_seq(list(range(0, tot_num)), n_process)
print(tot_num, ">>", split_num)
split_len = len(split_num)
if n_process > split_len:
n_process = split_len
for i in range(n_process):
_p = Process(target=_func,
args=(worker, split_num[i][0], split_num[i][1],
params, out_q))
_p.daemon = True
procs.append(_p)
_p.start()
try:
result = []
for i in range(n_process):
result.append(out_q.get())
for i in procs:
i.join()
except KeyboardInterrupt:
print('Killing all the children in the pool.')
for i in procs:
i.terminate()
i.join()
return -1
while not out_q.empty():
print(out_q.get(block=False))
return result | python | def multi_p_run(tot_num, _func, worker, params, n_process):
"""
Run _func with multi-process using params.
"""
from multiprocessing import Process, Queue
out_q = Queue()
procs = []
split_num = split_seq(list(range(0, tot_num)), n_process)
print(tot_num, ">>", split_num)
split_len = len(split_num)
if n_process > split_len:
n_process = split_len
for i in range(n_process):
_p = Process(target=_func,
args=(worker, split_num[i][0], split_num[i][1],
params, out_q))
_p.daemon = True
procs.append(_p)
_p.start()
try:
result = []
for i in range(n_process):
result.append(out_q.get())
for i in procs:
i.join()
except KeyboardInterrupt:
print('Killing all the children in the pool.')
for i in procs:
i.terminate()
i.join()
return -1
while not out_q.empty():
print(out_q.get(block=False))
return result | [
"def",
"multi_p_run",
"(",
"tot_num",
",",
"_func",
",",
"worker",
",",
"params",
",",
"n_process",
")",
":",
"from",
"multiprocessing",
"import",
"Process",
",",
"Queue",
"out_q",
"=",
"Queue",
"(",
")",
"procs",
"=",
"[",
"]",
"split_num",
"=",
"split_seq",
"(",
"list",
"(",
"range",
"(",
"0",
",",
"tot_num",
")",
")",
",",
"n_process",
")",
"print",
"(",
"tot_num",
",",
"\">>\"",
",",
"split_num",
")",
"split_len",
"=",
"len",
"(",
"split_num",
")",
"if",
"n_process",
">",
"split_len",
":",
"n_process",
"=",
"split_len",
"for",
"i",
"in",
"range",
"(",
"n_process",
")",
":",
"_p",
"=",
"Process",
"(",
"target",
"=",
"_func",
",",
"args",
"=",
"(",
"worker",
",",
"split_num",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"split_num",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"params",
",",
"out_q",
")",
")",
"_p",
".",
"daemon",
"=",
"True",
"procs",
".",
"append",
"(",
"_p",
")",
"_p",
".",
"start",
"(",
")",
"try",
":",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_process",
")",
":",
"result",
".",
"append",
"(",
"out_q",
".",
"get",
"(",
")",
")",
"for",
"i",
"in",
"procs",
":",
"i",
".",
"join",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"'Killing all the children in the pool.'",
")",
"for",
"i",
"in",
"procs",
":",
"i",
".",
"terminate",
"(",
")",
"i",
".",
"join",
"(",
")",
"return",
"-",
"1",
"while",
"not",
"out_q",
".",
"empty",
"(",
")",
":",
"print",
"(",
"out_q",
".",
"get",
"(",
"block",
"=",
"False",
")",
")",
"return",
"result"
] | Run _func with multi-process using params. | [
"Run",
"_func",
"with",
"multi",
"-",
"process",
"using",
"params",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/multi.py#L23-L63 |
23,559 | apache/incubator-mxnet | example/ssd/config/utils.py | namedtuple_with_defaults | def namedtuple_with_defaults(typename, field_names, default_values=()):
""" create a namedtuple with default values """
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None, ) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T | python | def namedtuple_with_defaults(typename, field_names, default_values=()):
""" create a namedtuple with default values """
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None, ) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T | [
"def",
"namedtuple_with_defaults",
"(",
"typename",
",",
"field_names",
",",
"default_values",
"=",
"(",
")",
")",
":",
"T",
"=",
"collections",
".",
"namedtuple",
"(",
"typename",
",",
"field_names",
")",
"T",
".",
"__new__",
".",
"__defaults__",
"=",
"(",
"None",
",",
")",
"*",
"len",
"(",
"T",
".",
"_fields",
")",
"if",
"isinstance",
"(",
"default_values",
",",
"collections",
".",
"Mapping",
")",
":",
"prototype",
"=",
"T",
"(",
"*",
"*",
"default_values",
")",
"else",
":",
"prototype",
"=",
"T",
"(",
"*",
"default_values",
")",
"T",
".",
"__new__",
".",
"__defaults__",
"=",
"tuple",
"(",
"prototype",
")",
"return",
"T"
] | create a namedtuple with default values | [
"create",
"a",
"namedtuple",
"with",
"default",
"values"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L61-L70 |
23,560 | apache/incubator-mxnet | example/ssd/config/utils.py | merge_dict | def merge_dict(a, b):
""" merge dict a, b, with b overriding keys in a """
c = a.copy()
c.update(b)
return c | python | def merge_dict(a, b):
""" merge dict a, b, with b overriding keys in a """
c = a.copy()
c.update(b)
return c | [
"def",
"merge_dict",
"(",
"a",
",",
"b",
")",
":",
"c",
"=",
"a",
".",
"copy",
"(",
")",
"c",
".",
"update",
"(",
"b",
")",
"return",
"c"
] | merge dict a, b, with b overriding keys in a | [
"merge",
"dict",
"a",
"b",
"with",
"b",
"overriding",
"keys",
"in",
"a"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L72-L76 |
23,561 | apache/incubator-mxnet | example/ssd/config/utils.py | zip_namedtuple | def zip_namedtuple(nt_list):
""" accept list of namedtuple, return a dict of zipped fields """
if not nt_list:
return dict()
if not isinstance(nt_list, list):
nt_list = [nt_list]
for nt in nt_list:
assert type(nt) == type(nt_list[0])
ret = {k : [v] for k, v in nt_list[0]._asdict().items()}
for nt in nt_list[1:]:
for k, v in nt._asdict().items():
ret[k].append(v)
return ret | python | def zip_namedtuple(nt_list):
""" accept list of namedtuple, return a dict of zipped fields """
if not nt_list:
return dict()
if not isinstance(nt_list, list):
nt_list = [nt_list]
for nt in nt_list:
assert type(nt) == type(nt_list[0])
ret = {k : [v] for k, v in nt_list[0]._asdict().items()}
for nt in nt_list[1:]:
for k, v in nt._asdict().items():
ret[k].append(v)
return ret | [
"def",
"zip_namedtuple",
"(",
"nt_list",
")",
":",
"if",
"not",
"nt_list",
":",
"return",
"dict",
"(",
")",
"if",
"not",
"isinstance",
"(",
"nt_list",
",",
"list",
")",
":",
"nt_list",
"=",
"[",
"nt_list",
"]",
"for",
"nt",
"in",
"nt_list",
":",
"assert",
"type",
"(",
"nt",
")",
"==",
"type",
"(",
"nt_list",
"[",
"0",
"]",
")",
"ret",
"=",
"{",
"k",
":",
"[",
"v",
"]",
"for",
"k",
",",
"v",
"in",
"nt_list",
"[",
"0",
"]",
".",
"_asdict",
"(",
")",
".",
"items",
"(",
")",
"}",
"for",
"nt",
"in",
"nt_list",
"[",
"1",
":",
"]",
":",
"for",
"k",
",",
"v",
"in",
"nt",
".",
"_asdict",
"(",
")",
".",
"items",
"(",
")",
":",
"ret",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"return",
"ret"
] | accept list of namedtuple, return a dict of zipped fields | [
"accept",
"list",
"of",
"namedtuple",
"return",
"a",
"dict",
"of",
"zipped",
"fields"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L78-L90 |
23,562 | apache/incubator-mxnet | example/ssd/config/utils.py | config_as_dict | def config_as_dict(cfg):
""" convert raw configuration to unified dictionary """
ret = cfg.__dict__.copy()
# random cropping params
del ret['rand_crop_samplers']
assert isinstance(cfg.rand_crop_samplers, list)
ret = merge_dict(ret, zip_namedtuple(cfg.rand_crop_samplers))
num_crop_sampler = len(cfg.rand_crop_samplers)
ret['num_crop_sampler'] = num_crop_sampler # must specify the #
ret['rand_crop_prob'] = 1.0 / (num_crop_sampler + 1) * num_crop_sampler
# random padding params
del ret['rand_pad']
ret = merge_dict(ret, cfg.rand_pad._asdict())
# color jitter
del ret['color_jitter']
ret = merge_dict(ret, cfg.color_jitter._asdict())
return ret | python | def config_as_dict(cfg):
""" convert raw configuration to unified dictionary """
ret = cfg.__dict__.copy()
# random cropping params
del ret['rand_crop_samplers']
assert isinstance(cfg.rand_crop_samplers, list)
ret = merge_dict(ret, zip_namedtuple(cfg.rand_crop_samplers))
num_crop_sampler = len(cfg.rand_crop_samplers)
ret['num_crop_sampler'] = num_crop_sampler # must specify the #
ret['rand_crop_prob'] = 1.0 / (num_crop_sampler + 1) * num_crop_sampler
# random padding params
del ret['rand_pad']
ret = merge_dict(ret, cfg.rand_pad._asdict())
# color jitter
del ret['color_jitter']
ret = merge_dict(ret, cfg.color_jitter._asdict())
return ret | [
"def",
"config_as_dict",
"(",
"cfg",
")",
":",
"ret",
"=",
"cfg",
".",
"__dict__",
".",
"copy",
"(",
")",
"# random cropping params",
"del",
"ret",
"[",
"'rand_crop_samplers'",
"]",
"assert",
"isinstance",
"(",
"cfg",
".",
"rand_crop_samplers",
",",
"list",
")",
"ret",
"=",
"merge_dict",
"(",
"ret",
",",
"zip_namedtuple",
"(",
"cfg",
".",
"rand_crop_samplers",
")",
")",
"num_crop_sampler",
"=",
"len",
"(",
"cfg",
".",
"rand_crop_samplers",
")",
"ret",
"[",
"'num_crop_sampler'",
"]",
"=",
"num_crop_sampler",
"# must specify the #",
"ret",
"[",
"'rand_crop_prob'",
"]",
"=",
"1.0",
"/",
"(",
"num_crop_sampler",
"+",
"1",
")",
"*",
"num_crop_sampler",
"# random padding params",
"del",
"ret",
"[",
"'rand_pad'",
"]",
"ret",
"=",
"merge_dict",
"(",
"ret",
",",
"cfg",
".",
"rand_pad",
".",
"_asdict",
"(",
")",
")",
"# color jitter",
"del",
"ret",
"[",
"'color_jitter'",
"]",
"ret",
"=",
"merge_dict",
"(",
"ret",
",",
"cfg",
".",
"color_jitter",
".",
"_asdict",
"(",
")",
")",
"return",
"ret"
] | convert raw configuration to unified dictionary | [
"convert",
"raw",
"configuration",
"to",
"unified",
"dictionary"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L92-L108 |
23,563 | apache/incubator-mxnet | python/mxnet/contrib/onnx/onnx2mx/import_model.py | get_model_metadata | def get_model_metadata(model_file):
"""
Returns the name and shape information of input and output tensors of the given ONNX model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
model_metadata : dict
A dictionary object mapping various metadata to its corresponding value.
The dictionary will have the following template::
'input_tensor_data' : list of tuples representing the shape of the input paramters
'output_tensor_data' : list of tuples representing the shape of the output of the model
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
model_proto = onnx.load_model(model_file)
metadata = graph.get_graph_metadata(model_proto.graph)
return metadata | python | def get_model_metadata(model_file):
"""
Returns the name and shape information of input and output tensors of the given ONNX model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
model_metadata : dict
A dictionary object mapping various metadata to its corresponding value.
The dictionary will have the following template::
'input_tensor_data' : list of tuples representing the shape of the input paramters
'output_tensor_data' : list of tuples representing the shape of the output of the model
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
model_proto = onnx.load_model(model_file)
metadata = graph.get_graph_metadata(model_proto.graph)
return metadata | [
"def",
"get_model_metadata",
"(",
"model_file",
")",
":",
"graph",
"=",
"GraphProto",
"(",
")",
"try",
":",
"import",
"onnx",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Onnx and protobuf need to be installed. \"",
"+",
"\"Instructions to install - https://github.com/onnx/onnx\"",
")",
"model_proto",
"=",
"onnx",
".",
"load_model",
"(",
"model_file",
")",
"metadata",
"=",
"graph",
".",
"get_graph_metadata",
"(",
"model_proto",
".",
"graph",
")",
"return",
"metadata"
] | Returns the name and shape information of input and output tensors of the given ONNX model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
model_metadata : dict
A dictionary object mapping various metadata to its corresponding value.
The dictionary will have the following template::
'input_tensor_data' : list of tuples representing the shape of the input paramters
'output_tensor_data' : list of tuples representing the shape of the output of the model | [
"Returns",
"the",
"name",
"and",
"shape",
"information",
"of",
"input",
"and",
"output",
"tensors",
"of",
"the",
"given",
"ONNX",
"model",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/import_model.py#L62-L93 |
23,564 | apache/incubator-mxnet | example/ssd/symbol/common.py | multi_layer_feature | def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128):
"""Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols
"""
# arguments check
assert len(from_layers) > 0
assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0
assert len(from_layers) == len(num_filters) == len(strides) == len(pads)
internals = body.get_internals()
layers = []
for k, params in enumerate(zip(from_layers, num_filters, strides, pads)):
from_layer, num_filter, s, p = params
if from_layer.strip():
# extract from base network
layer = internals[from_layer.strip() + '_output']
layers.append(layer)
else:
# attach from last feature layer
assert len(layers) > 0
assert num_filter > 0
layer = layers[-1]
num_1x1 = max(min_filter, num_filter // 2)
conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k),
num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')
conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k),
num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu')
layers.append(conv_3x3)
return layers | python | def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128):
"""Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols
"""
# arguments check
assert len(from_layers) > 0
assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0
assert len(from_layers) == len(num_filters) == len(strides) == len(pads)
internals = body.get_internals()
layers = []
for k, params in enumerate(zip(from_layers, num_filters, strides, pads)):
from_layer, num_filter, s, p = params
if from_layer.strip():
# extract from base network
layer = internals[from_layer.strip() + '_output']
layers.append(layer)
else:
# attach from last feature layer
assert len(layers) > 0
assert num_filter > 0
layer = layers[-1]
num_1x1 = max(min_filter, num_filter // 2)
conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k),
num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')
conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k),
num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu')
layers.append(conv_3x3)
return layers | [
"def",
"multi_layer_feature",
"(",
"body",
",",
"from_layers",
",",
"num_filters",
",",
"strides",
",",
"pads",
",",
"min_filter",
"=",
"128",
")",
":",
"# arguments check",
"assert",
"len",
"(",
"from_layers",
")",
">",
"0",
"assert",
"isinstance",
"(",
"from_layers",
"[",
"0",
"]",
",",
"str",
")",
"and",
"len",
"(",
"from_layers",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
">",
"0",
"assert",
"len",
"(",
"from_layers",
")",
"==",
"len",
"(",
"num_filters",
")",
"==",
"len",
"(",
"strides",
")",
"==",
"len",
"(",
"pads",
")",
"internals",
"=",
"body",
".",
"get_internals",
"(",
")",
"layers",
"=",
"[",
"]",
"for",
"k",
",",
"params",
"in",
"enumerate",
"(",
"zip",
"(",
"from_layers",
",",
"num_filters",
",",
"strides",
",",
"pads",
")",
")",
":",
"from_layer",
",",
"num_filter",
",",
"s",
",",
"p",
"=",
"params",
"if",
"from_layer",
".",
"strip",
"(",
")",
":",
"# extract from base network",
"layer",
"=",
"internals",
"[",
"from_layer",
".",
"strip",
"(",
")",
"+",
"'_output'",
"]",
"layers",
".",
"append",
"(",
"layer",
")",
"else",
":",
"# attach from last feature layer",
"assert",
"len",
"(",
"layers",
")",
">",
"0",
"assert",
"num_filter",
">",
"0",
"layer",
"=",
"layers",
"[",
"-",
"1",
"]",
"num_1x1",
"=",
"max",
"(",
"min_filter",
",",
"num_filter",
"//",
"2",
")",
"conv_1x1",
"=",
"conv_act_layer",
"(",
"layer",
",",
"'multi_feat_%d_conv_1x1'",
"%",
"(",
"k",
")",
",",
"num_1x1",
",",
"kernel",
"=",
"(",
"1",
",",
"1",
")",
",",
"pad",
"=",
"(",
"0",
",",
"0",
")",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
",",
"act_type",
"=",
"'relu'",
")",
"conv_3x3",
"=",
"conv_act_layer",
"(",
"conv_1x1",
",",
"'multi_feat_%d_conv_3x3'",
"%",
"(",
"k",
")",
",",
"num_filter",
",",
"kernel",
"=",
"(",
"3",
",",
"3",
")",
",",
"pad",
"=",
"(",
"p",
",",
"p",
")",
",",
"stride",
"=",
"(",
"s",
",",
"s",
")",
",",
"act_type",
"=",
"'relu'",
")",
"layers",
".",
"append",
"(",
"conv_3x3",
")",
"return",
"layers"
] | Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols | [
"Wrapper",
"function",
"to",
"extract",
"features",
"from",
"base",
"network",
"attaching",
"extra",
"layers",
"and",
"SSD",
"specific",
"layers"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/common.py#L96-L151 |
23,565 | apache/incubator-mxnet | python/mxnet/gluon/loss.py | _apply_weighting | def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss | python | def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss | [
"def",
"_apply_weighting",
"(",
"F",
",",
"loss",
",",
"weight",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"if",
"sample_weight",
"is",
"not",
"None",
":",
"loss",
"=",
"F",
".",
"broadcast_mul",
"(",
"loss",
",",
"sample_weight",
")",
"if",
"weight",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"numeric_types",
")",
",",
"\"weight must be a number\"",
"loss",
"=",
"loss",
"*",
"weight",
"return",
"loss"
] | Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss | [
"Apply",
"weighting",
"to",
"loss",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/loss.py#L34-L62 |
23,566 | apache/incubator-mxnet | python/mxnet/gluon/loss.py | _reshape_like | def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y) | python | def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y) | [
"def",
"_reshape_like",
"(",
"F",
",",
"x",
",",
"y",
")",
":",
"return",
"x",
".",
"reshape",
"(",
"y",
".",
"shape",
")",
"if",
"F",
"is",
"ndarray",
"else",
"F",
".",
"reshape_like",
"(",
"x",
",",
"y",
")"
] | Reshapes x to the same shape as y. | [
"Reshapes",
"x",
"to",
"the",
"same",
"shape",
"as",
"y",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/loss.py#L65-L67 |
23,567 | apache/incubator-mxnet | example/neural-style/nstyle.py | get_tv_grad_executor | def get_tv_grad_executor(img, ctx, tv_weight):
"""create TV gradient executor with input binded on img
"""
if tv_weight <= 0.0:
return None
nchannel = img.shape[1]
simg = mx.sym.Variable("img")
skernel = mx.sym.Variable("kernel")
channels = mx.sym.SliceChannel(simg, num_outputs=nchannel)
out = mx.sym.Concat(*[
mx.sym.Convolution(data=channels[i], weight=skernel,
num_filter=1,
kernel=(3, 3), pad=(1,1),
no_bias=True, stride=(1,1))
for i in range(nchannel)])
kernel = mx.nd.array(np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
.reshape((1, 1, 3, 3)),
ctx) / 8.0
out = out * tv_weight
return out.bind(ctx, args={"img": img,
"kernel": kernel}) | python | def get_tv_grad_executor(img, ctx, tv_weight):
"""create TV gradient executor with input binded on img
"""
if tv_weight <= 0.0:
return None
nchannel = img.shape[1]
simg = mx.sym.Variable("img")
skernel = mx.sym.Variable("kernel")
channels = mx.sym.SliceChannel(simg, num_outputs=nchannel)
out = mx.sym.Concat(*[
mx.sym.Convolution(data=channels[i], weight=skernel,
num_filter=1,
kernel=(3, 3), pad=(1,1),
no_bias=True, stride=(1,1))
for i in range(nchannel)])
kernel = mx.nd.array(np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
.reshape((1, 1, 3, 3)),
ctx) / 8.0
out = out * tv_weight
return out.bind(ctx, args={"img": img,
"kernel": kernel}) | [
"def",
"get_tv_grad_executor",
"(",
"img",
",",
"ctx",
",",
"tv_weight",
")",
":",
"if",
"tv_weight",
"<=",
"0.0",
":",
"return",
"None",
"nchannel",
"=",
"img",
".",
"shape",
"[",
"1",
"]",
"simg",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"\"img\"",
")",
"skernel",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"\"kernel\"",
")",
"channels",
"=",
"mx",
".",
"sym",
".",
"SliceChannel",
"(",
"simg",
",",
"num_outputs",
"=",
"nchannel",
")",
"out",
"=",
"mx",
".",
"sym",
".",
"Concat",
"(",
"*",
"[",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"data",
"=",
"channels",
"[",
"i",
"]",
",",
"weight",
"=",
"skernel",
",",
"num_filter",
"=",
"1",
",",
"kernel",
"=",
"(",
"3",
",",
"3",
")",
",",
"pad",
"=",
"(",
"1",
",",
"1",
")",
",",
"no_bias",
"=",
"True",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
")",
"for",
"i",
"in",
"range",
"(",
"nchannel",
")",
"]",
")",
"kernel",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"1",
",",
"0",
"]",
",",
"[",
"-",
"1",
",",
"4",
",",
"-",
"1",
"]",
",",
"[",
"0",
",",
"-",
"1",
",",
"0",
"]",
"]",
")",
".",
"reshape",
"(",
"(",
"1",
",",
"1",
",",
"3",
",",
"3",
")",
")",
",",
"ctx",
")",
"/",
"8.0",
"out",
"=",
"out",
"*",
"tv_weight",
"return",
"out",
".",
"bind",
"(",
"ctx",
",",
"args",
"=",
"{",
"\"img\"",
":",
"img",
",",
"\"kernel\"",
":",
"kernel",
"}",
")"
] | create TV gradient executor with input binded on img | [
"create",
"TV",
"gradient",
"executor",
"with",
"input",
"binded",
"on",
"img"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/neural-style/nstyle.py#L143-L165 |
23,568 | apache/incubator-mxnet | example/deep-embedded-clustering/data.py | get_mnist | def get_mnist():
""" Gets MNIST dataset """
np.random.seed(1234) # set seed for deterministic ordering
mnist_data = mx.test_utils.get_mnist()
X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']])
Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']])
p = np.random.permutation(X.shape[0])
X = X[p].reshape((X.shape[0], -1)).astype(np.float32)*5
Y = Y[p]
return X, Y | python | def get_mnist():
""" Gets MNIST dataset """
np.random.seed(1234) # set seed for deterministic ordering
mnist_data = mx.test_utils.get_mnist()
X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']])
Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']])
p = np.random.permutation(X.shape[0])
X = X[p].reshape((X.shape[0], -1)).astype(np.float32)*5
Y = Y[p]
return X, Y | [
"def",
"get_mnist",
"(",
")",
":",
"np",
".",
"random",
".",
"seed",
"(",
"1234",
")",
"# set seed for deterministic ordering",
"mnist_data",
"=",
"mx",
".",
"test_utils",
".",
"get_mnist",
"(",
")",
"X",
"=",
"np",
".",
"concatenate",
"(",
"[",
"mnist_data",
"[",
"'train_data'",
"]",
",",
"mnist_data",
"[",
"'test_data'",
"]",
"]",
")",
"Y",
"=",
"np",
".",
"concatenate",
"(",
"[",
"mnist_data",
"[",
"'train_label'",
"]",
",",
"mnist_data",
"[",
"'test_label'",
"]",
"]",
")",
"p",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"X",
"=",
"X",
"[",
"p",
"]",
".",
"reshape",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"*",
"5",
"Y",
"=",
"Y",
"[",
"p",
"]",
"return",
"X",
",",
"Y"
] | Gets MNIST dataset | [
"Gets",
"MNIST",
"dataset"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/deep-embedded-clustering/data.py#L25-L35 |
23,569 | apache/incubator-mxnet | python/mxnet/executor_manager.py | _split_input_slice | def _split_input_slice(batch_size, work_load_list):
"""Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
In case of too many splits, leading to some empty slices.
"""
total_work_load = sum(work_load_list)
batch_num_list = [round(work_load * batch_size / total_work_load)
for work_load in work_load_list]
batch_num_sum = sum(batch_num_list)
if batch_num_sum < batch_size:
batch_num_list[-1] += batch_size - batch_num_sum
slices = []
end = 0
for batch_num in batch_num_list:
begin = int(min((end, batch_size)))
end = int(min((begin + batch_num, batch_size)))
if begin >= end:
raise ValueError('Too many slices. Some splits are empty.')
slices.append(slice(begin, end))
return slices | python | def _split_input_slice(batch_size, work_load_list):
"""Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
In case of too many splits, leading to some empty slices.
"""
total_work_load = sum(work_load_list)
batch_num_list = [round(work_load * batch_size / total_work_load)
for work_load in work_load_list]
batch_num_sum = sum(batch_num_list)
if batch_num_sum < batch_size:
batch_num_list[-1] += batch_size - batch_num_sum
slices = []
end = 0
for batch_num in batch_num_list:
begin = int(min((end, batch_size)))
end = int(min((begin + batch_num, batch_size)))
if begin >= end:
raise ValueError('Too many slices. Some splits are empty.')
slices.append(slice(begin, end))
return slices | [
"def",
"_split_input_slice",
"(",
"batch_size",
",",
"work_load_list",
")",
":",
"total_work_load",
"=",
"sum",
"(",
"work_load_list",
")",
"batch_num_list",
"=",
"[",
"round",
"(",
"work_load",
"*",
"batch_size",
"/",
"total_work_load",
")",
"for",
"work_load",
"in",
"work_load_list",
"]",
"batch_num_sum",
"=",
"sum",
"(",
"batch_num_list",
")",
"if",
"batch_num_sum",
"<",
"batch_size",
":",
"batch_num_list",
"[",
"-",
"1",
"]",
"+=",
"batch_size",
"-",
"batch_num_sum",
"slices",
"=",
"[",
"]",
"end",
"=",
"0",
"for",
"batch_num",
"in",
"batch_num_list",
":",
"begin",
"=",
"int",
"(",
"min",
"(",
"(",
"end",
",",
"batch_size",
")",
")",
")",
"end",
"=",
"int",
"(",
"min",
"(",
"(",
"begin",
"+",
"batch_num",
",",
"batch_size",
")",
")",
")",
"if",
"begin",
">=",
"end",
":",
"raise",
"ValueError",
"(",
"'Too many slices. Some splits are empty.'",
")",
"slices",
".",
"append",
"(",
"slice",
"(",
"begin",
",",
"end",
")",
")",
"return",
"slices"
] | Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
In case of too many splits, leading to some empty slices. | [
"Get",
"input",
"slice",
"from",
"the",
"input",
"shape",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L31-L66 |
23,570 | apache/incubator-mxnet | python/mxnet/executor_manager.py | _check_arguments | def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name) | python | def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name) | [
"def",
"_check_arguments",
"(",
"symbol",
")",
":",
"arg_set",
"=",
"set",
"(",
")",
"arg_names",
"=",
"symbol",
".",
"list_arguments",
"(",
")",
"for",
"name",
"in",
"arg_names",
":",
"if",
"name",
"in",
"arg_set",
":",
"raise",
"ValueError",
"(",
"(",
"'Find duplicated argument name \\\"%s\\\", '",
"+",
"'please make the weight name non-duplicated(using name arguments), '",
"+",
"'arguments are %s'",
")",
"%",
"(",
"name",
",",
"str",
"(",
"arg_names",
")",
")",
")",
"arg_set",
".",
"add",
"(",
"name",
")",
"aux_set",
"=",
"set",
"(",
")",
"aux_names",
"=",
"symbol",
".",
"list_auxiliary_states",
"(",
")",
"for",
"name",
"in",
"aux_names",
":",
"if",
"name",
"in",
"aux_set",
":",
"raise",
"ValueError",
"(",
"(",
"'Find duplicated auxiliary param name \\\"%s\\\", '",
"+",
"'please make the weight name non-duplicated(using name arguments), '",
"+",
"'arguments are %s, auxiliary params are %s'",
")",
"%",
"(",
"name",
",",
"str",
"(",
"arg_names",
")",
",",
"str",
"(",
"aux_names",
")",
")",
")",
"aux_set",
".",
"add",
"(",
"name",
")"
] | Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration. | [
"Check",
"the",
"argument",
"names",
"of",
"symbol",
".",
"This",
"function",
"checks",
"the",
"duplication",
"of",
"arguments",
"in",
"Symbol",
".",
"The",
"check",
"is",
"done",
"for",
"feedforward",
"net",
"for",
"now",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L68-L96 |
23,571 | apache/incubator-mxnet | python/mxnet/executor_manager.py | DataParallelExecutorGroup.forward | def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train) | python | def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train) | [
"def",
"forward",
"(",
"self",
",",
"is_train",
"=",
"False",
")",
":",
"for",
"texec",
"in",
"self",
".",
"train_execs",
":",
"texec",
".",
"forward",
"(",
"is_train",
"=",
"is_train",
")"
] | Perform a forward pass on each executor. | [
"Perform",
"a",
"forward",
"pass",
"on",
"each",
"executor",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L279-L282 |
23,572 | apache/incubator-mxnet | python/mxnet/executor_manager.py | DataParallelExecutorGroup.update_metric | def update_metric(self, metric, labels, pre_sliced=False):
"""Update evaluation metric with label and current outputs."""
for current_exec, (texec, islice) in enumerate(zip(self.train_execs, self.slices)):
if not pre_sliced:
labels_slice = [label[islice] for label in labels]
else:
labels_slice = labels[current_exec]
metric.update(labels_slice, texec.outputs) | python | def update_metric(self, metric, labels, pre_sliced=False):
"""Update evaluation metric with label and current outputs."""
for current_exec, (texec, islice) in enumerate(zip(self.train_execs, self.slices)):
if not pre_sliced:
labels_slice = [label[islice] for label in labels]
else:
labels_slice = labels[current_exec]
metric.update(labels_slice, texec.outputs) | [
"def",
"update_metric",
"(",
"self",
",",
"metric",
",",
"labels",
",",
"pre_sliced",
"=",
"False",
")",
":",
"for",
"current_exec",
",",
"(",
"texec",
",",
"islice",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"train_execs",
",",
"self",
".",
"slices",
")",
")",
":",
"if",
"not",
"pre_sliced",
":",
"labels_slice",
"=",
"[",
"label",
"[",
"islice",
"]",
"for",
"label",
"in",
"labels",
"]",
"else",
":",
"labels_slice",
"=",
"labels",
"[",
"current_exec",
"]",
"metric",
".",
"update",
"(",
"labels_slice",
",",
"texec",
".",
"outputs",
")"
] | Update evaluation metric with label and current outputs. | [
"Update",
"evaluation",
"metric",
"with",
"label",
"and",
"current",
"outputs",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L289-L296 |
23,573 | apache/incubator-mxnet | python/mxnet/executor_manager.py | DataParallelExecutorManager.install_monitor | def install_monitor(self, monitor):
"""Install monitor on all executors."""
if self.sym_gen is not None:
raise NotImplementedError("Monitoring is not implemented for bucketing")
for train_exec in self.execgrp.train_execs:
monitor.install(train_exec) | python | def install_monitor(self, monitor):
"""Install monitor on all executors."""
if self.sym_gen is not None:
raise NotImplementedError("Monitoring is not implemented for bucketing")
for train_exec in self.execgrp.train_execs:
monitor.install(train_exec) | [
"def",
"install_monitor",
"(",
"self",
",",
"monitor",
")",
":",
"if",
"self",
".",
"sym_gen",
"is",
"not",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Monitoring is not implemented for bucketing\"",
")",
"for",
"train_exec",
"in",
"self",
".",
"execgrp",
".",
"train_execs",
":",
"monitor",
".",
"install",
"(",
"train_exec",
")"
] | Install monitor on all executors. | [
"Install",
"monitor",
"on",
"all",
"executors",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L355-L361 |
23,574 | apache/incubator-mxnet | python/mxnet/executor_manager.py | DataParallelExecutorManager.set_params | def set_params(self, arg_params, aux_params):
"""Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
"""
for texec in self.execgrp.train_execs:
texec.copy_params_from(arg_params, aux_params) | python | def set_params(self, arg_params, aux_params):
"""Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
"""
for texec in self.execgrp.train_execs:
texec.copy_params_from(arg_params, aux_params) | [
"def",
"set_params",
"(",
"self",
",",
"arg_params",
",",
"aux_params",
")",
":",
"for",
"texec",
"in",
"self",
".",
"execgrp",
".",
"train_execs",
":",
"texec",
".",
"copy_params_from",
"(",
"arg_params",
",",
"aux_params",
")"
] | Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays. | [
"Set",
"parameter",
"and",
"aux",
"values",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L363-L375 |
23,575 | apache/incubator-mxnet | python/mxnet/executor_manager.py | DataParallelExecutorManager.update_metric | def update_metric(self, metric, labels, pre_sliced=False):
"""Update metric with the current executor."""
self.curr_execgrp.update_metric(metric, labels, pre_sliced) | python | def update_metric(self, metric, labels, pre_sliced=False):
"""Update metric with the current executor."""
self.curr_execgrp.update_metric(metric, labels, pre_sliced) | [
"def",
"update_metric",
"(",
"self",
",",
"metric",
",",
"labels",
",",
"pre_sliced",
"=",
"False",
")",
":",
"self",
".",
"curr_execgrp",
".",
"update_metric",
"(",
"metric",
",",
"labels",
",",
"pre_sliced",
")"
] | Update metric with the current executor. | [
"Update",
"metric",
"with",
"the",
"current",
"executor",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L442-L444 |
23,576 | apache/incubator-mxnet | example/reinforcement-learning/dqn/replay_memory.py | ReplayMemory.clear | def clear(self):
"""
Clear all contents in the relay memory
"""
self.states[:] = 0
self.actions[:] = 0
self.rewards[:] = 0
self.terminate_flags[:] = 0
self.top = 0
self.size = 0 | python | def clear(self):
"""
Clear all contents in the relay memory
"""
self.states[:] = 0
self.actions[:] = 0
self.rewards[:] = 0
self.terminate_flags[:] = 0
self.top = 0
self.size = 0 | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"states",
"[",
":",
"]",
"=",
"0",
"self",
".",
"actions",
"[",
":",
"]",
"=",
"0",
"self",
".",
"rewards",
"[",
":",
"]",
"=",
"0",
"self",
".",
"terminate_flags",
"[",
":",
"]",
"=",
"0",
"self",
".",
"top",
"=",
"0",
"self",
".",
"size",
"=",
"0"
] | Clear all contents in the relay memory | [
"Clear",
"all",
"contents",
"in",
"the",
"relay",
"memory"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/reinforcement-learning/dqn/replay_memory.py#L63-L72 |
23,577 | apache/incubator-mxnet | cpp-package/scripts/lint.py | process | def process(fname, allow_type):
"""Process a file."""
fname = str(fname)
# HACK: ignore op.h which is automatically generated
if fname.endswith('op.h'):
return
arr = fname.rsplit('.', 1)
if fname.find('#') != -1 or arr[-1] not in allow_type:
return
if arr[-1] in CXX_SUFFIX:
_HELPER.process_cpp(fname, arr[-1])
if arr[-1] in PYTHON_SUFFIX:
_HELPER.process_python(fname) | python | def process(fname, allow_type):
"""Process a file."""
fname = str(fname)
# HACK: ignore op.h which is automatically generated
if fname.endswith('op.h'):
return
arr = fname.rsplit('.', 1)
if fname.find('#') != -1 or arr[-1] not in allow_type:
return
if arr[-1] in CXX_SUFFIX:
_HELPER.process_cpp(fname, arr[-1])
if arr[-1] in PYTHON_SUFFIX:
_HELPER.process_python(fname) | [
"def",
"process",
"(",
"fname",
",",
"allow_type",
")",
":",
"fname",
"=",
"str",
"(",
"fname",
")",
"# HACK: ignore op.h which is automatically generated",
"if",
"fname",
".",
"endswith",
"(",
"'op.h'",
")",
":",
"return",
"arr",
"=",
"fname",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"if",
"fname",
".",
"find",
"(",
"'#'",
")",
"!=",
"-",
"1",
"or",
"arr",
"[",
"-",
"1",
"]",
"not",
"in",
"allow_type",
":",
"return",
"if",
"arr",
"[",
"-",
"1",
"]",
"in",
"CXX_SUFFIX",
":",
"_HELPER",
".",
"process_cpp",
"(",
"fname",
",",
"arr",
"[",
"-",
"1",
"]",
")",
"if",
"arr",
"[",
"-",
"1",
"]",
"in",
"PYTHON_SUFFIX",
":",
"_HELPER",
".",
"process_python",
"(",
"fname",
")"
] | Process a file. | [
"Process",
"a",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L148-L160 |
23,578 | apache/incubator-mxnet | cpp-package/scripts/lint.py | LintHelper._print_summary_map | def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass | python | def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass | [
"def",
"_print_summary_map",
"(",
"strm",
",",
"result_map",
",",
"ftype",
")",
":",
"if",
"len",
"(",
"result_map",
")",
"==",
"0",
":",
"return",
"0",
"npass",
"=",
"len",
"(",
"[",
"x",
"for",
"k",
",",
"x",
"in",
"result_map",
".",
"iteritems",
"(",
")",
"if",
"len",
"(",
"x",
")",
"==",
"0",
"]",
")",
"strm",
".",
"write",
"(",
"'=====%d/%d %s files passed check=====\\n'",
"%",
"(",
"npass",
",",
"len",
"(",
"result_map",
")",
",",
"ftype",
")",
")",
"for",
"fname",
",",
"emap",
"in",
"result_map",
".",
"iteritems",
"(",
")",
":",
"if",
"len",
"(",
"emap",
")",
"==",
"0",
":",
"continue",
"strm",
".",
"write",
"(",
"'%s: %d Errors of %d Categories map=%s\\n'",
"%",
"(",
"fname",
",",
"sum",
"(",
"emap",
".",
"values",
"(",
")",
")",
",",
"len",
"(",
"emap",
")",
",",
"str",
"(",
"emap",
")",
")",
")",
"return",
"len",
"(",
"result_map",
")",
"-",
"npass"
] | Print summary of certain result map. | [
"Print",
"summary",
"of",
"certain",
"result",
"map",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L40-L51 |
23,579 | apache/incubator-mxnet | cpp-package/scripts/lint.py | LintHelper.process_cpp | def process_cpp(self, path, suffix):
"""Process a cpp file."""
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors | python | def process_cpp(self, path, suffix):
"""Process a cpp file."""
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors | [
"def",
"process_cpp",
"(",
"self",
",",
"path",
",",
"suffix",
")",
":",
"_cpplint_state",
".",
"ResetErrorCounts",
"(",
")",
"cpplint",
".",
"ProcessFile",
"(",
"str",
"(",
"path",
")",
",",
"_cpplint_state",
".",
"verbose_level",
")",
"_cpplint_state",
".",
"PrintErrorCounts",
"(",
")",
"errors",
"=",
"_cpplint_state",
".",
"errors_by_category",
".",
"copy",
"(",
")",
"if",
"suffix",
"==",
"'h'",
":",
"self",
".",
"cpp_header_map",
"[",
"str",
"(",
"path",
")",
"]",
"=",
"errors",
"else",
":",
"self",
".",
"cpp_src_map",
"[",
"str",
"(",
"path",
")",
"]",
"=",
"errors"
] | Process a cpp file. | [
"Process",
"a",
"cpp",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L78-L88 |
23,580 | apache/incubator-mxnet | cpp-package/scripts/lint.py | LintHelper.process_python | def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
print(pylint_stderr.read())
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
sys.stderr.write('\n')
self.python_map[str(path)] = emap | python | def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
print(pylint_stderr.read())
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
sys.stderr.write('\n')
self.python_map[str(path)] = emap | [
"def",
"process_python",
"(",
"self",
",",
"path",
")",
":",
"(",
"pylint_stdout",
",",
"pylint_stderr",
")",
"=",
"epylint",
".",
"py_run",
"(",
"' '",
".",
"join",
"(",
"[",
"str",
"(",
"path",
")",
"]",
"+",
"self",
".",
"pylint_opts",
")",
",",
"return_std",
"=",
"True",
")",
"emap",
"=",
"{",
"}",
"print",
"(",
"pylint_stderr",
".",
"read",
"(",
")",
")",
"for",
"line",
"in",
"pylint_stdout",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"line",
")",
"key",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'('",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"key",
"not",
"in",
"self",
".",
"pylint_cats",
":",
"continue",
"if",
"key",
"not",
"in",
"emap",
":",
"emap",
"[",
"key",
"]",
"=",
"1",
"else",
":",
"emap",
"[",
"key",
"]",
"+=",
"1",
"sys",
".",
"stderr",
".",
"write",
"(",
"'\\n'",
")",
"self",
".",
"python_map",
"[",
"str",
"(",
"path",
")",
"]",
"=",
"emap"
] | Process a python file. | [
"Process",
"a",
"python",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L90-L106 |
23,581 | apache/incubator-mxnet | cpp-package/scripts/lint.py | LintHelper.print_summary | def print_summary(self, strm):
"""Print summary of lint."""
nerr = 0
nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header')
nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce')
nerr += LintHelper._print_summary_map(strm, self.python_map, 'python')
if nerr == 0:
strm.write('All passed!\n')
else:
strm.write('%d files failed lint\n' % nerr)
return nerr | python | def print_summary(self, strm):
"""Print summary of lint."""
nerr = 0
nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header')
nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce')
nerr += LintHelper._print_summary_map(strm, self.python_map, 'python')
if nerr == 0:
strm.write('All passed!\n')
else:
strm.write('%d files failed lint\n' % nerr)
return nerr | [
"def",
"print_summary",
"(",
"self",
",",
"strm",
")",
":",
"nerr",
"=",
"0",
"nerr",
"+=",
"LintHelper",
".",
"_print_summary_map",
"(",
"strm",
",",
"self",
".",
"cpp_header_map",
",",
"'cpp-header'",
")",
"nerr",
"+=",
"LintHelper",
".",
"_print_summary_map",
"(",
"strm",
",",
"self",
".",
"cpp_src_map",
",",
"'cpp-soruce'",
")",
"nerr",
"+=",
"LintHelper",
".",
"_print_summary_map",
"(",
"strm",
",",
"self",
".",
"python_map",
",",
"'python'",
")",
"if",
"nerr",
"==",
"0",
":",
"strm",
".",
"write",
"(",
"'All passed!\\n'",
")",
"else",
":",
"strm",
".",
"write",
"(",
"'%d files failed lint\\n'",
"%",
"nerr",
")",
"return",
"nerr"
] | Print summary of lint. | [
"Print",
"summary",
"of",
"lint",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L108-L118 |
23,582 | apache/incubator-mxnet | python/mxnet/kvstore_server.py | KVStoreServer._controller | def _controller(self):
"""Return the server controller."""
def server_controller(cmd_id, cmd_body, _):
"""Server controler."""
if not self.init_logginig:
# the reason put the codes here is because we cannot get
# kvstore.rank earlier
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0:
try:
optimizer = pickle.loads(cmd_body)
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller | python | def _controller(self):
"""Return the server controller."""
def server_controller(cmd_id, cmd_body, _):
"""Server controler."""
if not self.init_logginig:
# the reason put the codes here is because we cannot get
# kvstore.rank earlier
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0:
try:
optimizer = pickle.loads(cmd_body)
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller | [
"def",
"_controller",
"(",
"self",
")",
":",
"def",
"server_controller",
"(",
"cmd_id",
",",
"cmd_body",
",",
"_",
")",
":",
"\"\"\"Server controler.\"\"\"",
"if",
"not",
"self",
".",
"init_logginig",
":",
"# the reason put the codes here is because we cannot get",
"# kvstore.rank earlier",
"head",
"=",
"'%(asctime)-15s Server['",
"+",
"str",
"(",
"self",
".",
"kvstore",
".",
"rank",
")",
"+",
"'] %(message)s'",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format",
"=",
"head",
")",
"self",
".",
"init_logginig",
"=",
"True",
"if",
"cmd_id",
"==",
"0",
":",
"try",
":",
"optimizer",
"=",
"pickle",
".",
"loads",
"(",
"cmd_body",
")",
"except",
":",
"raise",
"self",
".",
"kvstore",
".",
"set_optimizer",
"(",
"optimizer",
")",
"else",
":",
"print",
"(",
"\"server %d, unknown command (%d, %s)\"",
"%",
"(",
"self",
".",
"kvstore",
".",
"rank",
",",
"cmd_id",
",",
"cmd_body",
")",
")",
"return",
"server_controller"
] | Return the server controller. | [
"Return",
"the",
"server",
"controller",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/kvstore_server.py#L41-L62 |
23,583 | apache/incubator-mxnet | python/mxnet/kvstore_server.py | KVStoreServer.run | def run(self):
"""Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x)
"""
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None)) | python | def run(self):
"""Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x)
"""
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None)) | [
"def",
"run",
"(",
"self",
")",
":",
"_ctrl_proto",
"=",
"ctypes",
".",
"CFUNCTYPE",
"(",
"None",
",",
"ctypes",
".",
"c_int",
",",
"ctypes",
".",
"c_char_p",
",",
"ctypes",
".",
"c_void_p",
")",
"check_call",
"(",
"_LIB",
".",
"MXKVStoreRunServer",
"(",
"self",
".",
"handle",
",",
"_ctrl_proto",
"(",
"self",
".",
"_controller",
"(",
")",
")",
",",
"None",
")",
")"
] | Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x) | [
"Run",
"the",
"server",
"whose",
"behavior",
"is",
"like",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/kvstore_server.py#L64-L73 |
23,584 | apache/incubator-mxnet | python/mxnet/ndarray/register.py | _make_ndarray_function | def _make_ndarray_function(handle, name, func_name):
"""Create a NDArray function from the FunctionHandle."""
code, doc_str = _generate_ndarray_function_code(handle, name, func_name)
local = {}
exec(code, None, local) # pylint: disable=exec-used
ndarray_function = local[func_name]
ndarray_function.__name__ = func_name
ndarray_function.__doc__ = doc_str
ndarray_function.__module__ = 'mxnet.ndarray'
return ndarray_function | python | def _make_ndarray_function(handle, name, func_name):
"""Create a NDArray function from the FunctionHandle."""
code, doc_str = _generate_ndarray_function_code(handle, name, func_name)
local = {}
exec(code, None, local) # pylint: disable=exec-used
ndarray_function = local[func_name]
ndarray_function.__name__ = func_name
ndarray_function.__doc__ = doc_str
ndarray_function.__module__ = 'mxnet.ndarray'
return ndarray_function | [
"def",
"_make_ndarray_function",
"(",
"handle",
",",
"name",
",",
"func_name",
")",
":",
"code",
",",
"doc_str",
"=",
"_generate_ndarray_function_code",
"(",
"handle",
",",
"name",
",",
"func_name",
")",
"local",
"=",
"{",
"}",
"exec",
"(",
"code",
",",
"None",
",",
"local",
")",
"# pylint: disable=exec-used",
"ndarray_function",
"=",
"local",
"[",
"func_name",
"]",
"ndarray_function",
".",
"__name__",
"=",
"func_name",
"ndarray_function",
".",
"__doc__",
"=",
"doc_str",
"ndarray_function",
".",
"__module__",
"=",
"'mxnet.ndarray'",
"return",
"ndarray_function"
] | Create a NDArray function from the FunctionHandle. | [
"Create",
"a",
"NDArray",
"function",
"from",
"the",
"FunctionHandle",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/register.py#L158-L168 |
23,585 | apache/incubator-mxnet | python/mxnet/contrib/text/utils.py | count_tokens_from_str | def count_tokens_from_str(source_str, token_delim=' ', seq_delim='\n',
to_lower=False, counter_to_update=None):
"""Counts tokens in the specified string.
For token_delim=\'<td>\' and seq_delim=\'<sd>\', a specified string of two sequences of
tokens may look like::
<td>token1<td>token2<td>token3<td><sd><td>token4<td>token5<td><sd>
<td> and <sd> are regular expressions. Make use of \\\\ to allow special characters as
delimiters. The list of
special characters can be found at https://docs.python.org/3/library/re.html.
Parameters
----------
source_str : str
A source string of tokens.
token_delim : str, default ' '
A token delimiter.
seq_delim : str, default '\\\\n'
A sequence delimiter.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter_to_update : collections.Counter or None, default None
The collections.Counter instance to be updated with the token counts of `source_str`. If
None, return a new collections.Counter instance counting tokens from `source_str`.
Returns
-------
collections.Counter
The `counter_to_update` collections.Counter instance after being updated with the token
counts of `source_str`. If `counter_to_update` is None, return a new collections.Counter
instance counting tokens from `source_str`.
Examples
--------
>>> source_str = ' Life is great ! \\n life is good . \\n'
>>> count_tokens_from_str(token_line, ' ', '\\n', True)
Counter({'!': 1, '.': 1, 'good': 1, 'great': 1, 'is': 2, 'life': 2})
>>> source_str = '*Life*is*great*!*\\n*life*is*good*.*\\n'
>>> count_tokens_from_str(token_line, '\\*', '\\n', True)
Counter({'is': 2, 'life': 2, '!': 1, 'great': 1, 'good': 1, '.': 1})
"""
source_str = filter(None,
re.split(token_delim + '|' + seq_delim, source_str))
if to_lower:
source_str = [t.lower() for t in source_str]
if counter_to_update is None:
return collections.Counter(source_str)
else:
counter_to_update.update(source_str)
return counter_to_update | python | def count_tokens_from_str(source_str, token_delim=' ', seq_delim='\n',
to_lower=False, counter_to_update=None):
"""Counts tokens in the specified string.
For token_delim=\'<td>\' and seq_delim=\'<sd>\', a specified string of two sequences of
tokens may look like::
<td>token1<td>token2<td>token3<td><sd><td>token4<td>token5<td><sd>
<td> and <sd> are regular expressions. Make use of \\\\ to allow special characters as
delimiters. The list of
special characters can be found at https://docs.python.org/3/library/re.html.
Parameters
----------
source_str : str
A source string of tokens.
token_delim : str, default ' '
A token delimiter.
seq_delim : str, default '\\\\n'
A sequence delimiter.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter_to_update : collections.Counter or None, default None
The collections.Counter instance to be updated with the token counts of `source_str`. If
None, return a new collections.Counter instance counting tokens from `source_str`.
Returns
-------
collections.Counter
The `counter_to_update` collections.Counter instance after being updated with the token
counts of `source_str`. If `counter_to_update` is None, return a new collections.Counter
instance counting tokens from `source_str`.
Examples
--------
>>> source_str = ' Life is great ! \\n life is good . \\n'
>>> count_tokens_from_str(token_line, ' ', '\\n', True)
Counter({'!': 1, '.': 1, 'good': 1, 'great': 1, 'is': 2, 'life': 2})
>>> source_str = '*Life*is*great*!*\\n*life*is*good*.*\\n'
>>> count_tokens_from_str(token_line, '\\*', '\\n', True)
Counter({'is': 2, 'life': 2, '!': 1, 'great': 1, 'good': 1, '.': 1})
"""
source_str = filter(None,
re.split(token_delim + '|' + seq_delim, source_str))
if to_lower:
source_str = [t.lower() for t in source_str]
if counter_to_update is None:
return collections.Counter(source_str)
else:
counter_to_update.update(source_str)
return counter_to_update | [
"def",
"count_tokens_from_str",
"(",
"source_str",
",",
"token_delim",
"=",
"' '",
",",
"seq_delim",
"=",
"'\\n'",
",",
"to_lower",
"=",
"False",
",",
"counter_to_update",
"=",
"None",
")",
":",
"source_str",
"=",
"filter",
"(",
"None",
",",
"re",
".",
"split",
"(",
"token_delim",
"+",
"'|'",
"+",
"seq_delim",
",",
"source_str",
")",
")",
"if",
"to_lower",
":",
"source_str",
"=",
"[",
"t",
".",
"lower",
"(",
")",
"for",
"t",
"in",
"source_str",
"]",
"if",
"counter_to_update",
"is",
"None",
":",
"return",
"collections",
".",
"Counter",
"(",
"source_str",
")",
"else",
":",
"counter_to_update",
".",
"update",
"(",
"source_str",
")",
"return",
"counter_to_update"
] | Counts tokens in the specified string.
For token_delim=\'<td>\' and seq_delim=\'<sd>\', a specified string of two sequences of
tokens may look like::
<td>token1<td>token2<td>token3<td><sd><td>token4<td>token5<td><sd>
<td> and <sd> are regular expressions. Make use of \\\\ to allow special characters as
delimiters. The list of
special characters can be found at https://docs.python.org/3/library/re.html.
Parameters
----------
source_str : str
A source string of tokens.
token_delim : str, default ' '
A token delimiter.
seq_delim : str, default '\\\\n'
A sequence delimiter.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter_to_update : collections.Counter or None, default None
The collections.Counter instance to be updated with the token counts of `source_str`. If
None, return a new collections.Counter instance counting tokens from `source_str`.
Returns
-------
collections.Counter
The `counter_to_update` collections.Counter instance after being updated with the token
counts of `source_str`. If `counter_to_update` is None, return a new collections.Counter
instance counting tokens from `source_str`.
Examples
--------
>>> source_str = ' Life is great ! \\n life is good . \\n'
>>> count_tokens_from_str(token_line, ' ', '\\n', True)
Counter({'!': 1, '.': 1, 'good': 1, 'great': 1, 'is': 2, 'life': 2})
>>> source_str = '*Life*is*great*!*\\n*life*is*good*.*\\n'
>>> count_tokens_from_str(token_line, '\\*', '\\n', True)
Counter({'is': 2, 'life': 2, '!': 1, 'great': 1, 'good': 1, '.': 1}) | [
"Counts",
"tokens",
"in",
"the",
"specified",
"string",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/text/utils.py#L28-L85 |
23,586 | apache/incubator-mxnet | python/mxnet/ndarray/utils.py | load | def load(fname):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(fname, string_types):
raise TypeError('fname required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value)) | python | def load(fname):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(fname, string_types):
raise TypeError('fname required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value)) | [
"def",
"load",
"(",
"fname",
")",
":",
"if",
"not",
"isinstance",
"(",
"fname",
",",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'fname required to be a string'",
")",
"out_size",
"=",
"mx_uint",
"(",
")",
"out_name_size",
"=",
"mx_uint",
"(",
")",
"handles",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"names",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayLoad",
"(",
"c_str",
"(",
"fname",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"handles",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_name_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"names",
")",
")",
")",
"if",
"out_name_size",
".",
"value",
"==",
"0",
":",
"return",
"[",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"out_size",
".",
"value",
")",
"]",
"else",
":",
"assert",
"out_name_size",
".",
"value",
"==",
"out_size",
".",
"value",
"return",
"dict",
"(",
"(",
"py_str",
"(",
"names",
"[",
"i",
"]",
")",
",",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"out_size",
".",
"value",
")",
")"
] | Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data. | [
"Loads",
"an",
"array",
"from",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L149-L182 |
23,587 | apache/incubator-mxnet | python/mxnet/ndarray/utils.py | load_frombuffer | def load_frombuffer(buf):
"""Loads an array dictionary or list from a buffer
See more details in ``save``.
Parameters
----------
buf : str
Buffer containing contents of a file as a string or bytes.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(buf, string_types + tuple([bytes])):
raise TypeError('buf required to be a string or bytes')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoadFromBuffer(buf,
mx_uint(len(buf)),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value)) | python | def load_frombuffer(buf):
"""Loads an array dictionary or list from a buffer
See more details in ``save``.
Parameters
----------
buf : str
Buffer containing contents of a file as a string or bytes.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(buf, string_types + tuple([bytes])):
raise TypeError('buf required to be a string or bytes')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoadFromBuffer(buf,
mx_uint(len(buf)),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value)) | [
"def",
"load_frombuffer",
"(",
"buf",
")",
":",
"if",
"not",
"isinstance",
"(",
"buf",
",",
"string_types",
"+",
"tuple",
"(",
"[",
"bytes",
"]",
")",
")",
":",
"raise",
"TypeError",
"(",
"'buf required to be a string or bytes'",
")",
"out_size",
"=",
"mx_uint",
"(",
")",
"out_name_size",
"=",
"mx_uint",
"(",
")",
"handles",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"names",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayLoadFromBuffer",
"(",
"buf",
",",
"mx_uint",
"(",
"len",
"(",
"buf",
")",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"handles",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_name_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"names",
")",
")",
")",
"if",
"out_name_size",
".",
"value",
"==",
"0",
":",
"return",
"[",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"out_size",
".",
"value",
")",
"]",
"else",
":",
"assert",
"out_name_size",
".",
"value",
"==",
"out_size",
".",
"value",
"return",
"dict",
"(",
"(",
"py_str",
"(",
"names",
"[",
"i",
"]",
")",
",",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"out_size",
".",
"value",
")",
")"
] | Loads an array dictionary or list from a buffer
See more details in ``save``.
Parameters
----------
buf : str
Buffer containing contents of a file as a string or bytes.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data. | [
"Loads",
"an",
"array",
"dictionary",
"or",
"list",
"from",
"a",
"buffer"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L185-L219 |
23,588 | apache/incubator-mxnet | python/mxnet/ndarray/utils.py | save | def save(fname, data):
"""Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
"""
if isinstance(data, NDArray):
data = [data]
handles = c_array(NDArrayHandle, [])
if isinstance(data, dict):
str_keys = data.keys()
nd_vals = data.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, NDArray) for v in nd_vals):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(data, list):
if any(not isinstance(v, NDArray) for v in data):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = None
handles = c_handle_array(data)
else:
raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs "
"or a list of NDarrays.")
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
handles,
keys)) | python | def save(fname, data):
"""Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
"""
if isinstance(data, NDArray):
data = [data]
handles = c_array(NDArrayHandle, [])
if isinstance(data, dict):
str_keys = data.keys()
nd_vals = data.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, NDArray) for v in nd_vals):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(data, list):
if any(not isinstance(v, NDArray) for v in data):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = None
handles = c_handle_array(data)
else:
raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs "
"or a list of NDarrays.")
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
handles,
keys)) | [
"def",
"save",
"(",
"fname",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"NDArray",
")",
":",
"data",
"=",
"[",
"data",
"]",
"handles",
"=",
"c_array",
"(",
"NDArrayHandle",
",",
"[",
"]",
")",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"str_keys",
"=",
"data",
".",
"keys",
"(",
")",
"nd_vals",
"=",
"data",
".",
"values",
"(",
")",
"if",
"any",
"(",
"not",
"isinstance",
"(",
"k",
",",
"string_types",
")",
"for",
"k",
"in",
"str_keys",
")",
"or",
"any",
"(",
"not",
"isinstance",
"(",
"v",
",",
"NDArray",
")",
"for",
"v",
"in",
"nd_vals",
")",
":",
"raise",
"TypeError",
"(",
"'save only accept dict str->NDArray or list of NDArray'",
")",
"keys",
"=",
"c_str_array",
"(",
"str_keys",
")",
"handles",
"=",
"c_handle_array",
"(",
"nd_vals",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"if",
"any",
"(",
"not",
"isinstance",
"(",
"v",
",",
"NDArray",
")",
"for",
"v",
"in",
"data",
")",
":",
"raise",
"TypeError",
"(",
"'save only accept dict str->NDArray or list of NDArray'",
")",
"keys",
"=",
"None",
"handles",
"=",
"c_handle_array",
"(",
"data",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"data needs to either be a NDArray, dict of str, NDArray pairs \"",
"\"or a list of NDarrays.\"",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySave",
"(",
"c_str",
"(",
"fname",
")",
",",
"mx_uint",
"(",
"len",
"(",
"handles",
")",
")",
",",
"handles",
",",
"keys",
")",
")"
] | Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>} | [
"Saves",
"a",
"list",
"of",
"arrays",
"or",
"a",
"dict",
"of",
"str",
"-",
">",
"array",
"to",
"file",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L222-L273 |
23,589 | apache/incubator-mxnet | python/mxnet/gluon/block.py | _common_prefix | def _common_prefix(names):
"""Get the common prefix for all names"""
if not names:
return ''
prefix = names[0]
for name in names:
i = 0
while i < len(prefix) and i < len(name) and prefix[i] == name[i]:
i += 1
prefix = prefix[:i]
return prefix | python | def _common_prefix(names):
"""Get the common prefix for all names"""
if not names:
return ''
prefix = names[0]
for name in names:
i = 0
while i < len(prefix) and i < len(name) and prefix[i] == name[i]:
i += 1
prefix = prefix[:i]
return prefix | [
"def",
"_common_prefix",
"(",
"names",
")",
":",
"if",
"not",
"names",
":",
"return",
"''",
"prefix",
"=",
"names",
"[",
"0",
"]",
"for",
"name",
"in",
"names",
":",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"prefix",
")",
"and",
"i",
"<",
"len",
"(",
"name",
")",
"and",
"prefix",
"[",
"i",
"]",
"==",
"name",
"[",
"i",
"]",
":",
"i",
"+=",
"1",
"prefix",
"=",
"prefix",
"[",
":",
"i",
"]",
"return",
"prefix"
] | Get the common prefix for all names | [
"Get",
"the",
"common",
"prefix",
"for",
"all",
"names"
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L939-L949 |
23,590 | apache/incubator-mxnet | python/mxnet/gluon/block.py | _infer_param_types | def _infer_param_types(in_params, out_params, arg_params, aux_params, default_dtype=mx_real_t):
"""Utility function that helps in inferring DType of args and auxs params
from given input param.
Parameters
----------
in_params: List of Symbol
List of input symbol variables.
out_params: Symbol
Output symbol variable.
arg_params: List of Str
List of names of argument parametrs.
aux_params: List of Str
List of names of auxiliary parameters.
default_dtype: numpy.dtype or str, default 'float32'
Default data type for arg_params and aux_params, if unable to infer the type.
Returns
-------
arg_types: List of numpy.dtype
List of arg_params type. Order is same as arg_params.
Defaults to 'float32', if unable to infer type.
aux_types: List of numpy.dtype
List of aux_params type. Order is same as aux_params.
Defaults to 'float32', if unable to infer type.
"""
arg_types = None
aux_types = None
# Get Input symbol details. This will be used to infer types of
# other parameters.
input_sym_names = [in_param.name for in_param in in_params]
# Try to infer input types. If not successful, we will set default dtype.
# If successful, we will try to infer other params in the graph.
input_sym_arg_types = []
can_infer_input_type = True
for in_param in in_params:
input_sym_arg_type = in_param.infer_type()[0]
if not input_sym_arg_type or len(input_sym_arg_type) < 1:
can_infer_input_type = False
break
else:
input_sym_arg_types.append(in_param.infer_type()[0][0])
# Try to infer types of other parameters.
if can_infer_input_type:
params = {k:v for k, v in zip(input_sym_names, input_sym_arg_types)}
arg_types, _, aux_types = out_params.infer_type(**params)
if arg_types is None or len(arg_types) != len(arg_params):
arg_types = []
for _ in arg_params:
arg_types.append(default_dtype)
if aux_types is None or len(aux_types) != len(aux_params):
aux_types = []
for _ in aux_params:
aux_types.append(default_dtype)
return (arg_types, aux_types) | python | def _infer_param_types(in_params, out_params, arg_params, aux_params, default_dtype=mx_real_t):
"""Utility function that helps in inferring DType of args and auxs params
from given input param.
Parameters
----------
in_params: List of Symbol
List of input symbol variables.
out_params: Symbol
Output symbol variable.
arg_params: List of Str
List of names of argument parametrs.
aux_params: List of Str
List of names of auxiliary parameters.
default_dtype: numpy.dtype or str, default 'float32'
Default data type for arg_params and aux_params, if unable to infer the type.
Returns
-------
arg_types: List of numpy.dtype
List of arg_params type. Order is same as arg_params.
Defaults to 'float32', if unable to infer type.
aux_types: List of numpy.dtype
List of aux_params type. Order is same as aux_params.
Defaults to 'float32', if unable to infer type.
"""
arg_types = None
aux_types = None
# Get Input symbol details. This will be used to infer types of
# other parameters.
input_sym_names = [in_param.name for in_param in in_params]
# Try to infer input types. If not successful, we will set default dtype.
# If successful, we will try to infer other params in the graph.
input_sym_arg_types = []
can_infer_input_type = True
for in_param in in_params:
input_sym_arg_type = in_param.infer_type()[0]
if not input_sym_arg_type or len(input_sym_arg_type) < 1:
can_infer_input_type = False
break
else:
input_sym_arg_types.append(in_param.infer_type()[0][0])
# Try to infer types of other parameters.
if can_infer_input_type:
params = {k:v for k, v in zip(input_sym_names, input_sym_arg_types)}
arg_types, _, aux_types = out_params.infer_type(**params)
if arg_types is None or len(arg_types) != len(arg_params):
arg_types = []
for _ in arg_params:
arg_types.append(default_dtype)
if aux_types is None or len(aux_types) != len(aux_params):
aux_types = []
for _ in aux_params:
aux_types.append(default_dtype)
return (arg_types, aux_types) | [
"def",
"_infer_param_types",
"(",
"in_params",
",",
"out_params",
",",
"arg_params",
",",
"aux_params",
",",
"default_dtype",
"=",
"mx_real_t",
")",
":",
"arg_types",
"=",
"None",
"aux_types",
"=",
"None",
"# Get Input symbol details. This will be used to infer types of",
"# other parameters.",
"input_sym_names",
"=",
"[",
"in_param",
".",
"name",
"for",
"in_param",
"in",
"in_params",
"]",
"# Try to infer input types. If not successful, we will set default dtype.",
"# If successful, we will try to infer other params in the graph.",
"input_sym_arg_types",
"=",
"[",
"]",
"can_infer_input_type",
"=",
"True",
"for",
"in_param",
"in",
"in_params",
":",
"input_sym_arg_type",
"=",
"in_param",
".",
"infer_type",
"(",
")",
"[",
"0",
"]",
"if",
"not",
"input_sym_arg_type",
"or",
"len",
"(",
"input_sym_arg_type",
")",
"<",
"1",
":",
"can_infer_input_type",
"=",
"False",
"break",
"else",
":",
"input_sym_arg_types",
".",
"append",
"(",
"in_param",
".",
"infer_type",
"(",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"# Try to infer types of other parameters.",
"if",
"can_infer_input_type",
":",
"params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"input_sym_names",
",",
"input_sym_arg_types",
")",
"}",
"arg_types",
",",
"_",
",",
"aux_types",
"=",
"out_params",
".",
"infer_type",
"(",
"*",
"*",
"params",
")",
"if",
"arg_types",
"is",
"None",
"or",
"len",
"(",
"arg_types",
")",
"!=",
"len",
"(",
"arg_params",
")",
":",
"arg_types",
"=",
"[",
"]",
"for",
"_",
"in",
"arg_params",
":",
"arg_types",
".",
"append",
"(",
"default_dtype",
")",
"if",
"aux_types",
"is",
"None",
"or",
"len",
"(",
"aux_types",
")",
"!=",
"len",
"(",
"aux_params",
")",
":",
"aux_types",
"=",
"[",
"]",
"for",
"_",
"in",
"aux_params",
":",
"aux_types",
".",
"append",
"(",
"default_dtype",
")",
"return",
"(",
"arg_types",
",",
"aux_types",
")"
] | Utility function that helps in inferring DType of args and auxs params
from given input param.
Parameters
----------
in_params: List of Symbol
List of input symbol variables.
out_params: Symbol
Output symbol variable.
arg_params: List of Str
List of names of argument parametrs.
aux_params: List of Str
List of names of auxiliary parameters.
default_dtype: numpy.dtype or str, default 'float32'
Default data type for arg_params and aux_params, if unable to infer the type.
Returns
-------
arg_types: List of numpy.dtype
List of arg_params type. Order is same as arg_params.
Defaults to 'float32', if unable to infer type.
aux_types: List of numpy.dtype
List of aux_params type. Order is same as aux_params.
Defaults to 'float32', if unable to infer type. | [
"Utility",
"function",
"that",
"helps",
"in",
"inferring",
"DType",
"of",
"args",
"and",
"auxs",
"params",
"from",
"given",
"input",
"param",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L1108-L1168 |
23,591 | apache/incubator-mxnet | python/mxnet/gluon/block.py | _BlockScope.create | def create(prefix, params, hint):
"""Creates prefix and params for new `Block`."""
current = getattr(_BlockScope._current, "value", None)
if current is None:
if prefix is None:
if not hasattr(_name.NameManager._current, "value"):
_name.NameManager._current.value = _name.NameManager()
prefix = _name.NameManager._current.value.get(None, hint) + '_'
if params is None:
params = ParameterDict(prefix)
else:
params = ParameterDict(params.prefix, params)
return prefix, params
if prefix is None:
count = current._counter.get(hint, 0)
prefix = '%s%d_'%(hint, count)
current._counter[hint] = count + 1
if params is None:
parent = current._block.params
params = ParameterDict(parent.prefix+prefix, parent._shared)
else:
params = ParameterDict(params.prefix, params)
return current._block.prefix+prefix, params | python | def create(prefix, params, hint):
"""Creates prefix and params for new `Block`."""
current = getattr(_BlockScope._current, "value", None)
if current is None:
if prefix is None:
if not hasattr(_name.NameManager._current, "value"):
_name.NameManager._current.value = _name.NameManager()
prefix = _name.NameManager._current.value.get(None, hint) + '_'
if params is None:
params = ParameterDict(prefix)
else:
params = ParameterDict(params.prefix, params)
return prefix, params
if prefix is None:
count = current._counter.get(hint, 0)
prefix = '%s%d_'%(hint, count)
current._counter[hint] = count + 1
if params is None:
parent = current._block.params
params = ParameterDict(parent.prefix+prefix, parent._shared)
else:
params = ParameterDict(params.prefix, params)
return current._block.prefix+prefix, params | [
"def",
"create",
"(",
"prefix",
",",
"params",
",",
"hint",
")",
":",
"current",
"=",
"getattr",
"(",
"_BlockScope",
".",
"_current",
",",
"\"value\"",
",",
"None",
")",
"if",
"current",
"is",
"None",
":",
"if",
"prefix",
"is",
"None",
":",
"if",
"not",
"hasattr",
"(",
"_name",
".",
"NameManager",
".",
"_current",
",",
"\"value\"",
")",
":",
"_name",
".",
"NameManager",
".",
"_current",
".",
"value",
"=",
"_name",
".",
"NameManager",
"(",
")",
"prefix",
"=",
"_name",
".",
"NameManager",
".",
"_current",
".",
"value",
".",
"get",
"(",
"None",
",",
"hint",
")",
"+",
"'_'",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"ParameterDict",
"(",
"prefix",
")",
"else",
":",
"params",
"=",
"ParameterDict",
"(",
"params",
".",
"prefix",
",",
"params",
")",
"return",
"prefix",
",",
"params",
"if",
"prefix",
"is",
"None",
":",
"count",
"=",
"current",
".",
"_counter",
".",
"get",
"(",
"hint",
",",
"0",
")",
"prefix",
"=",
"'%s%d_'",
"%",
"(",
"hint",
",",
"count",
")",
"current",
".",
"_counter",
"[",
"hint",
"]",
"=",
"count",
"+",
"1",
"if",
"params",
"is",
"None",
":",
"parent",
"=",
"current",
".",
"_block",
".",
"params",
"params",
"=",
"ParameterDict",
"(",
"parent",
".",
"prefix",
"+",
"prefix",
",",
"parent",
".",
"_shared",
")",
"else",
":",
"params",
"=",
"ParameterDict",
"(",
"params",
".",
"prefix",
",",
"params",
")",
"return",
"current",
".",
"_block",
".",
"prefix",
"+",
"prefix",
",",
"params"
] | Creates prefix and params for new `Block`. | [
"Creates",
"prefix",
"and",
"params",
"for",
"new",
"Block",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L49-L72 |
23,592 | apache/incubator-mxnet | python/mxnet/gluon/block.py | Block.load_parameters | def load_parameters(self, filename, ctx=None, allow_missing=False,
ignore_extra=False):
"""Load parameters from file previously saved by `save_parameters`.
Parameters
----------
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) to initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this Block.
References
----------
`Saving and Loading Gluon Models \
<https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_
"""
loaded = ndarray.load(filename)
params = self._collect_params_with_prefix()
if not loaded and not params:
return
if not any('.' in i for i in loaded.keys()):
# legacy loading
del loaded
self.collect_params().load(
filename, ctx, allow_missing, ignore_extra, self.prefix)
return
if not allow_missing:
for name in params.keys():
assert name in loaded, \
"Parameter '%s' is missing in file '%s', which contains parameters: %s. " \
"Set allow_missing=True to ignore missing parameters."%(
name, filename, _brief_print_list(loaded.keys()))
for name in loaded:
if not ignore_extra and name not in params:
raise ValueError(
"Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \
"which contains parameters %s. Set ignore_extra=True to ignore. "%(
name, filename, _brief_print_list(self._params.keys())))
if name in params:
params[name]._load_init(loaded[name], ctx) | python | def load_parameters(self, filename, ctx=None, allow_missing=False,
ignore_extra=False):
"""Load parameters from file previously saved by `save_parameters`.
Parameters
----------
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) to initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this Block.
References
----------
`Saving and Loading Gluon Models \
<https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_
"""
loaded = ndarray.load(filename)
params = self._collect_params_with_prefix()
if not loaded and not params:
return
if not any('.' in i for i in loaded.keys()):
# legacy loading
del loaded
self.collect_params().load(
filename, ctx, allow_missing, ignore_extra, self.prefix)
return
if not allow_missing:
for name in params.keys():
assert name in loaded, \
"Parameter '%s' is missing in file '%s', which contains parameters: %s. " \
"Set allow_missing=True to ignore missing parameters."%(
name, filename, _brief_print_list(loaded.keys()))
for name in loaded:
if not ignore_extra and name not in params:
raise ValueError(
"Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \
"which contains parameters %s. Set ignore_extra=True to ignore. "%(
name, filename, _brief_print_list(self._params.keys())))
if name in params:
params[name]._load_init(loaded[name], ctx) | [
"def",
"load_parameters",
"(",
"self",
",",
"filename",
",",
"ctx",
"=",
"None",
",",
"allow_missing",
"=",
"False",
",",
"ignore_extra",
"=",
"False",
")",
":",
"loaded",
"=",
"ndarray",
".",
"load",
"(",
"filename",
")",
"params",
"=",
"self",
".",
"_collect_params_with_prefix",
"(",
")",
"if",
"not",
"loaded",
"and",
"not",
"params",
":",
"return",
"if",
"not",
"any",
"(",
"'.'",
"in",
"i",
"for",
"i",
"in",
"loaded",
".",
"keys",
"(",
")",
")",
":",
"# legacy loading",
"del",
"loaded",
"self",
".",
"collect_params",
"(",
")",
".",
"load",
"(",
"filename",
",",
"ctx",
",",
"allow_missing",
",",
"ignore_extra",
",",
"self",
".",
"prefix",
")",
"return",
"if",
"not",
"allow_missing",
":",
"for",
"name",
"in",
"params",
".",
"keys",
"(",
")",
":",
"assert",
"name",
"in",
"loaded",
",",
"\"Parameter '%s' is missing in file '%s', which contains parameters: %s. \"",
"\"Set allow_missing=True to ignore missing parameters.\"",
"%",
"(",
"name",
",",
"filename",
",",
"_brief_print_list",
"(",
"loaded",
".",
"keys",
"(",
")",
")",
")",
"for",
"name",
"in",
"loaded",
":",
"if",
"not",
"ignore_extra",
"and",
"name",
"not",
"in",
"params",
":",
"raise",
"ValueError",
"(",
"\"Parameter '%s' loaded from file '%s' is not present in ParameterDict, \"",
"\"which contains parameters %s. Set ignore_extra=True to ignore. \"",
"%",
"(",
"name",
",",
"filename",
",",
"_brief_print_list",
"(",
"self",
".",
"_params",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"name",
"in",
"params",
":",
"params",
"[",
"name",
"]",
".",
"_load_init",
"(",
"loaded",
"[",
"name",
"]",
",",
"ctx",
")"
] | Load parameters from file previously saved by `save_parameters`.
Parameters
----------
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) to initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this Block.
References
----------
`Saving and Loading Gluon Models \
<https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_ | [
"Load",
"parameters",
"from",
"file",
"previously",
"saved",
"by",
"save_parameters",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L356-L402 |
23,593 | apache/incubator-mxnet | python/mxnet/gluon/block.py | Block.register_forward_pre_hook | def register_forward_pre_hook(self, hook):
r"""Registers a forward pre-hook on the block.
The hook function is called immediately before :func:`forward`.
It should not modify the input or output.
Parameters
----------
hook : callable
The forward hook function of form `hook(block, input) -> None`.
Returns
-------
:class:`mxnet.gluon.utils.HookHandle`
"""
handle = HookHandle()
handle.attach(self._forward_pre_hooks, hook)
return handle | python | def register_forward_pre_hook(self, hook):
r"""Registers a forward pre-hook on the block.
The hook function is called immediately before :func:`forward`.
It should not modify the input or output.
Parameters
----------
hook : callable
The forward hook function of form `hook(block, input) -> None`.
Returns
-------
:class:`mxnet.gluon.utils.HookHandle`
"""
handle = HookHandle()
handle.attach(self._forward_pre_hooks, hook)
return handle | [
"def",
"register_forward_pre_hook",
"(",
"self",
",",
"hook",
")",
":",
"handle",
"=",
"HookHandle",
"(",
")",
"handle",
".",
"attach",
"(",
"self",
".",
"_forward_pre_hooks",
",",
"hook",
")",
"return",
"handle"
] | r"""Registers a forward pre-hook on the block.
The hook function is called immediately before :func:`forward`.
It should not modify the input or output.
Parameters
----------
hook : callable
The forward hook function of form `hook(block, input) -> None`.
Returns
-------
:class:`mxnet.gluon.utils.HookHandle` | [
"r",
"Registers",
"a",
"forward",
"pre",
"-",
"hook",
"on",
"the",
"block",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L430-L447 |
23,594 | apache/incubator-mxnet | python/mxnet/gluon/block.py | Block.register_forward_hook | def register_forward_hook(self, hook):
r"""Registers a forward hook on the block.
The hook function is called immediately after :func:`forward`.
It should not modify the input or output.
Parameters
----------
hook : callable
The forward hook function of form `hook(block, input, output) -> None`.
Returns
-------
:class:`mxnet.gluon.utils.HookHandle`
"""
handle = HookHandle()
handle.attach(self._forward_hooks, hook)
return handle | python | def register_forward_hook(self, hook):
r"""Registers a forward hook on the block.
The hook function is called immediately after :func:`forward`.
It should not modify the input or output.
Parameters
----------
hook : callable
The forward hook function of form `hook(block, input, output) -> None`.
Returns
-------
:class:`mxnet.gluon.utils.HookHandle`
"""
handle = HookHandle()
handle.attach(self._forward_hooks, hook)
return handle | [
"def",
"register_forward_hook",
"(",
"self",
",",
"hook",
")",
":",
"handle",
"=",
"HookHandle",
"(",
")",
"handle",
".",
"attach",
"(",
"self",
".",
"_forward_hooks",
",",
"hook",
")",
"return",
"handle"
] | r"""Registers a forward hook on the block.
The hook function is called immediately after :func:`forward`.
It should not modify the input or output.
Parameters
----------
hook : callable
The forward hook function of form `hook(block, input, output) -> None`.
Returns
-------
:class:`mxnet.gluon.utils.HookHandle` | [
"r",
"Registers",
"a",
"forward",
"hook",
"on",
"the",
"block",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L449-L466 |
23,595 | apache/incubator-mxnet | python/mxnet/gluon/block.py | Block.apply | def apply(self, fn):
r"""Applies ``fn`` recursively to every child block as well as self.
Parameters
----------
fn : callable
Function to be applied to each submodule, of form `fn(block)`.
Returns
-------
this block
"""
for cld in self._children.values():
cld.apply(fn)
fn(self)
return self | python | def apply(self, fn):
r"""Applies ``fn`` recursively to every child block as well as self.
Parameters
----------
fn : callable
Function to be applied to each submodule, of form `fn(block)`.
Returns
-------
this block
"""
for cld in self._children.values():
cld.apply(fn)
fn(self)
return self | [
"def",
"apply",
"(",
"self",
",",
"fn",
")",
":",
"for",
"cld",
"in",
"self",
".",
"_children",
".",
"values",
"(",
")",
":",
"cld",
".",
"apply",
"(",
"fn",
")",
"fn",
"(",
"self",
")",
"return",
"self"
] | r"""Applies ``fn`` recursively to every child block as well as self.
Parameters
----------
fn : callable
Function to be applied to each submodule, of form `fn(block)`.
Returns
-------
this block | [
"r",
"Applies",
"fn",
"recursively",
"to",
"every",
"child",
"block",
"as",
"well",
"as",
"self",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L468-L483 |
23,596 | apache/incubator-mxnet | python/mxnet/gluon/block.py | Block.cast | def cast(self, dtype):
"""Cast this Block to use another data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type.
"""
for child in self._children.values():
child.cast(dtype)
for _, param in self.params.items():
param.cast(dtype) | python | def cast(self, dtype):
"""Cast this Block to use another data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type.
"""
for child in self._children.values():
child.cast(dtype)
for _, param in self.params.items():
param.cast(dtype) | [
"def",
"cast",
"(",
"self",
",",
"dtype",
")",
":",
"for",
"child",
"in",
"self",
".",
"_children",
".",
"values",
"(",
")",
":",
"child",
".",
"cast",
"(",
"dtype",
")",
"for",
"_",
",",
"param",
"in",
"self",
".",
"params",
".",
"items",
"(",
")",
":",
"param",
".",
"cast",
"(",
"dtype",
")"
] | Cast this Block to use another data type.
Parameters
----------
dtype : str or numpy.dtype
The new data type. | [
"Cast",
"this",
"Block",
"to",
"use",
"another",
"data",
"type",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L522-L533 |
23,597 | apache/incubator-mxnet | python/mxnet/gluon/block.py | HybridBlock._infer_attrs | def _infer_attrs(self, infer_fn, attr, *args):
"""Generic infer attributes."""
inputs, out = self._get_graph(*args)
args, _ = _flatten(args, "input")
with warnings.catch_warnings(record=True) as w:
arg_attrs, _, aux_attrs = getattr(out, infer_fn)(
**{i.name: getattr(j, attr) for i, j in zip(inputs, args)})
if arg_attrs is None:
raise ValueError(w[0].message)
sdict = {i: j for i, j in zip(out.list_arguments(), arg_attrs)}
sdict.update({name : attr for name, attr in \
zip(out.list_auxiliary_states(), aux_attrs)})
for i in self.collect_params().values():
setattr(i, attr, sdict[i.name]) | python | def _infer_attrs(self, infer_fn, attr, *args):
"""Generic infer attributes."""
inputs, out = self._get_graph(*args)
args, _ = _flatten(args, "input")
with warnings.catch_warnings(record=True) as w:
arg_attrs, _, aux_attrs = getattr(out, infer_fn)(
**{i.name: getattr(j, attr) for i, j in zip(inputs, args)})
if arg_attrs is None:
raise ValueError(w[0].message)
sdict = {i: j for i, j in zip(out.list_arguments(), arg_attrs)}
sdict.update({name : attr for name, attr in \
zip(out.list_auxiliary_states(), aux_attrs)})
for i in self.collect_params().values():
setattr(i, attr, sdict[i.name]) | [
"def",
"_infer_attrs",
"(",
"self",
",",
"infer_fn",
",",
"attr",
",",
"*",
"args",
")",
":",
"inputs",
",",
"out",
"=",
"self",
".",
"_get_graph",
"(",
"*",
"args",
")",
"args",
",",
"_",
"=",
"_flatten",
"(",
"args",
",",
"\"input\"",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
"record",
"=",
"True",
")",
"as",
"w",
":",
"arg_attrs",
",",
"_",
",",
"aux_attrs",
"=",
"getattr",
"(",
"out",
",",
"infer_fn",
")",
"(",
"*",
"*",
"{",
"i",
".",
"name",
":",
"getattr",
"(",
"j",
",",
"attr",
")",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"inputs",
",",
"args",
")",
"}",
")",
"if",
"arg_attrs",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"w",
"[",
"0",
"]",
".",
"message",
")",
"sdict",
"=",
"{",
"i",
":",
"j",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"out",
".",
"list_arguments",
"(",
")",
",",
"arg_attrs",
")",
"}",
"sdict",
".",
"update",
"(",
"{",
"name",
":",
"attr",
"for",
"name",
",",
"attr",
"in",
"zip",
"(",
"out",
".",
"list_auxiliary_states",
"(",
")",
",",
"aux_attrs",
")",
"}",
")",
"for",
"i",
"in",
"self",
".",
"collect_params",
"(",
")",
".",
"values",
"(",
")",
":",
"setattr",
"(",
"i",
",",
"attr",
",",
"sdict",
"[",
"i",
".",
"name",
"]",
")"
] | Generic infer attributes. | [
"Generic",
"infer",
"attributes",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L845-L858 |
23,598 | apache/incubator-mxnet | python/mxnet/gluon/block.py | HybridBlock.export | def export(self, path, epoch=0):
"""Export HybridBlock to json format that can be loaded by
`SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface.
.. note:: When there are only one input, it will have name `data`. When there
Are more than one inputs, they will be named as `data0`, `data1`, etc.
Parameters
----------
path : str
Path to save model. Two files `path-symbol.json` and `path-xxxx.params`
will be created, where xxxx is the 4 digits epoch number.
epoch : int
Epoch number of saved model.
"""
if not self._cached_graph:
raise RuntimeError(
"Please first call block.hybridize() and then run forward with "
"this block at least once before calling export.")
sym = self._cached_graph[1]
sym.save('%s-symbol.json'%path)
arg_names = set(sym.list_arguments())
aux_names = set(sym.list_auxiliary_states())
arg_dict = {}
for name, param in self.collect_params().items():
if name in arg_names:
arg_dict['arg:%s'%name] = param._reduce()
else:
assert name in aux_names
arg_dict['aux:%s'%name] = param._reduce()
ndarray.save('%s-%04d.params'%(path, epoch), arg_dict) | python | def export(self, path, epoch=0):
"""Export HybridBlock to json format that can be loaded by
`SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface.
.. note:: When there are only one input, it will have name `data`. When there
Are more than one inputs, they will be named as `data0`, `data1`, etc.
Parameters
----------
path : str
Path to save model. Two files `path-symbol.json` and `path-xxxx.params`
will be created, where xxxx is the 4 digits epoch number.
epoch : int
Epoch number of saved model.
"""
if not self._cached_graph:
raise RuntimeError(
"Please first call block.hybridize() and then run forward with "
"this block at least once before calling export.")
sym = self._cached_graph[1]
sym.save('%s-symbol.json'%path)
arg_names = set(sym.list_arguments())
aux_names = set(sym.list_auxiliary_states())
arg_dict = {}
for name, param in self.collect_params().items():
if name in arg_names:
arg_dict['arg:%s'%name] = param._reduce()
else:
assert name in aux_names
arg_dict['aux:%s'%name] = param._reduce()
ndarray.save('%s-%04d.params'%(path, epoch), arg_dict) | [
"def",
"export",
"(",
"self",
",",
"path",
",",
"epoch",
"=",
"0",
")",
":",
"if",
"not",
"self",
".",
"_cached_graph",
":",
"raise",
"RuntimeError",
"(",
"\"Please first call block.hybridize() and then run forward with \"",
"\"this block at least once before calling export.\"",
")",
"sym",
"=",
"self",
".",
"_cached_graph",
"[",
"1",
"]",
"sym",
".",
"save",
"(",
"'%s-symbol.json'",
"%",
"path",
")",
"arg_names",
"=",
"set",
"(",
"sym",
".",
"list_arguments",
"(",
")",
")",
"aux_names",
"=",
"set",
"(",
"sym",
".",
"list_auxiliary_states",
"(",
")",
")",
"arg_dict",
"=",
"{",
"}",
"for",
"name",
",",
"param",
"in",
"self",
".",
"collect_params",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"arg_names",
":",
"arg_dict",
"[",
"'arg:%s'",
"%",
"name",
"]",
"=",
"param",
".",
"_reduce",
"(",
")",
"else",
":",
"assert",
"name",
"in",
"aux_names",
"arg_dict",
"[",
"'aux:%s'",
"%",
"name",
"]",
"=",
"param",
".",
"_reduce",
"(",
")",
"ndarray",
".",
"save",
"(",
"'%s-%04d.params'",
"%",
"(",
"path",
",",
"epoch",
")",
",",
"arg_dict",
")"
] | Export HybridBlock to json format that can be loaded by
`SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface.
.. note:: When there are only one input, it will have name `data`. When there
Are more than one inputs, they will be named as `data0`, `data1`, etc.
Parameters
----------
path : str
Path to save model. Two files `path-symbol.json` and `path-xxxx.params`
will be created, where xxxx is the 4 digits epoch number.
epoch : int
Epoch number of saved model. | [
"Export",
"HybridBlock",
"to",
"json",
"format",
"that",
"can",
"be",
"loaded",
"by",
"SymbolBlock",
".",
"imports",
"mxnet",
".",
"mod",
".",
"Module",
"or",
"the",
"C",
"++",
"interface",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L868-L899 |
23,599 | apache/incubator-mxnet | python/mxnet/gluon/block.py | SymbolBlock.imports | def imports(symbol_file, input_names, param_file=None, ctx=None):
"""Import model previously saved by `HybridBlock.export` or
`Module.save_checkpoint` as a SymbolBlock for use in Gluon.
Parameters
----------
symbol_file : str
Path to symbol file.
input_names : list of str
List of input variable names
param_file : str, optional
Path to parameter file.
ctx : Context, default None
The context to initialize SymbolBlock on.
Returns
-------
SymbolBlock
SymbolBlock loaded from symbol and parameter files.
Examples
--------
>>> net1 = gluon.model_zoo.vision.resnet18_v1(
... prefix='resnet', pretrained=True)
>>> net1.hybridize()
>>> x = mx.nd.random.normal(shape=(1, 3, 32, 32))
>>> out1 = net1(x)
>>> net1.export('net1', epoch=1)
>>>
>>> net2 = gluon.SymbolBlock.imports(
... 'net1-symbol.json', ['data'], 'net1-0001.params')
>>> out2 = net2(x)
"""
sym = symbol.load(symbol_file)
if isinstance(input_names, str):
input_names = [input_names]
inputs = [symbol.var(i) for i in input_names]
ret = SymbolBlock(sym, inputs)
if param_file is not None:
ret.collect_params().load(param_file, ctx=ctx)
return ret | python | def imports(symbol_file, input_names, param_file=None, ctx=None):
"""Import model previously saved by `HybridBlock.export` or
`Module.save_checkpoint` as a SymbolBlock for use in Gluon.
Parameters
----------
symbol_file : str
Path to symbol file.
input_names : list of str
List of input variable names
param_file : str, optional
Path to parameter file.
ctx : Context, default None
The context to initialize SymbolBlock on.
Returns
-------
SymbolBlock
SymbolBlock loaded from symbol and parameter files.
Examples
--------
>>> net1 = gluon.model_zoo.vision.resnet18_v1(
... prefix='resnet', pretrained=True)
>>> net1.hybridize()
>>> x = mx.nd.random.normal(shape=(1, 3, 32, 32))
>>> out1 = net1(x)
>>> net1.export('net1', epoch=1)
>>>
>>> net2 = gluon.SymbolBlock.imports(
... 'net1-symbol.json', ['data'], 'net1-0001.params')
>>> out2 = net2(x)
"""
sym = symbol.load(symbol_file)
if isinstance(input_names, str):
input_names = [input_names]
inputs = [symbol.var(i) for i in input_names]
ret = SymbolBlock(sym, inputs)
if param_file is not None:
ret.collect_params().load(param_file, ctx=ctx)
return ret | [
"def",
"imports",
"(",
"symbol_file",
",",
"input_names",
",",
"param_file",
"=",
"None",
",",
"ctx",
"=",
"None",
")",
":",
"sym",
"=",
"symbol",
".",
"load",
"(",
"symbol_file",
")",
"if",
"isinstance",
"(",
"input_names",
",",
"str",
")",
":",
"input_names",
"=",
"[",
"input_names",
"]",
"inputs",
"=",
"[",
"symbol",
".",
"var",
"(",
"i",
")",
"for",
"i",
"in",
"input_names",
"]",
"ret",
"=",
"SymbolBlock",
"(",
"sym",
",",
"inputs",
")",
"if",
"param_file",
"is",
"not",
"None",
":",
"ret",
".",
"collect_params",
"(",
")",
".",
"load",
"(",
"param_file",
",",
"ctx",
"=",
"ctx",
")",
"return",
"ret"
] | Import model previously saved by `HybridBlock.export` or
`Module.save_checkpoint` as a SymbolBlock for use in Gluon.
Parameters
----------
symbol_file : str
Path to symbol file.
input_names : list of str
List of input variable names
param_file : str, optional
Path to parameter file.
ctx : Context, default None
The context to initialize SymbolBlock on.
Returns
-------
SymbolBlock
SymbolBlock loaded from symbol and parameter files.
Examples
--------
>>> net1 = gluon.model_zoo.vision.resnet18_v1(
... prefix='resnet', pretrained=True)
>>> net1.hybridize()
>>> x = mx.nd.random.normal(shape=(1, 3, 32, 32))
>>> out1 = net1(x)
>>> net1.export('net1', epoch=1)
>>>
>>> net2 = gluon.SymbolBlock.imports(
... 'net1-symbol.json', ['data'], 'net1-0001.params')
>>> out2 = net2(x) | [
"Import",
"model",
"previously",
"saved",
"by",
"HybridBlock",
".",
"export",
"or",
"Module",
".",
"save_checkpoint",
"as",
"a",
"SymbolBlock",
"for",
"use",
"in",
"Gluon",
"."
] | 1af29e9c060a4c7d60eeaacba32afdb9a7775ba7 | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L985-L1025 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.