id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
24,100
apache/incubator-mxnet
python/mxnet/recordio.py
MXIndexedRecordIO.tell
def tell(self): """Returns the current position of write head. Examples --------- >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w') >>> print(record.tell()) 0 >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) ... print(record.tell()) 16 32 48 64 80 """ assert self.writable pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOWriterTell(self.handle, ctypes.byref(pos))) return pos.value
python
def tell(self): """Returns the current position of write head. Examples --------- >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w') >>> print(record.tell()) 0 >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) ... print(record.tell()) 16 32 48 64 80 """ assert self.writable pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOWriterTell(self.handle, ctypes.byref(pos))) return pos.value
[ "def", "tell", "(", "self", ")", ":", "assert", "self", ".", "writable", "pos", "=", "ctypes", ".", "c_size_t", "(", ")", "check_call", "(", "_LIB", ".", "MXRecordIOWriterTell", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "pos", ")", ")", ")", "return", "pos", ".", "value" ]
Returns the current position of write head. Examples --------- >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w') >>> print(record.tell()) 0 >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) ... print(record.tell()) 16 32 48 64 80
[ "Returns", "the", "current", "position", "of", "write", "head", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L278-L298
24,101
apache/incubator-mxnet
python/mxnet/recordio.py
MXIndexedRecordIO.write_idx
def write_idx(self, idx, buf): """Inserts input record at given index. Examples --------- >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() Parameters ---------- idx : int Index of a file. buf : Record to write. """ key = self.key_type(idx) pos = self.tell() self.write(buf) self.fidx.write('%s\t%d\n'%(str(key), pos)) self.idx[key] = pos self.keys.append(key)
python
def write_idx(self, idx, buf): """Inserts input record at given index. Examples --------- >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() Parameters ---------- idx : int Index of a file. buf : Record to write. """ key = self.key_type(idx) pos = self.tell() self.write(buf) self.fidx.write('%s\t%d\n'%(str(key), pos)) self.idx[key] = pos self.keys.append(key)
[ "def", "write_idx", "(", "self", ",", "idx", ",", "buf", ")", ":", "key", "=", "self", ".", "key_type", "(", "idx", ")", "pos", "=", "self", ".", "tell", "(", ")", "self", ".", "write", "(", "buf", ")", "self", ".", "fidx", ".", "write", "(", "'%s\\t%d\\n'", "%", "(", "str", "(", "key", ")", ",", "pos", ")", ")", "self", ".", "idx", "[", "key", "]", "=", "pos", "self", ".", "keys", ".", "append", "(", "key", ")" ]
Inserts input record at given index. Examples --------- >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() Parameters ---------- idx : int Index of a file. buf : Record to write.
[ "Inserts", "input", "record", "at", "given", "index", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L316-L337
24,102
apache/incubator-mxnet
python/mxnet/notebook/callback.py
_add_new_columns
def _add_new_columns(dataframe, metrics): """Add new metrics as new columns to selected pandas dataframe. Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. metrics : metric.EvalMetric New metrics to be added. """ #TODO(leodirac): we don't really need to do this on every update. Optimize new_columns = set(metrics.keys()) - set(dataframe.columns) for col in new_columns: dataframe[col] = None
python
def _add_new_columns(dataframe, metrics): """Add new metrics as new columns to selected pandas dataframe. Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. metrics : metric.EvalMetric New metrics to be added. """ #TODO(leodirac): we don't really need to do this on every update. Optimize new_columns = set(metrics.keys()) - set(dataframe.columns) for col in new_columns: dataframe[col] = None
[ "def", "_add_new_columns", "(", "dataframe", ",", "metrics", ")", ":", "#TODO(leodirac): we don't really need to do this on every update. Optimize", "new_columns", "=", "set", "(", "metrics", ".", "keys", "(", ")", ")", "-", "set", "(", "dataframe", ".", "columns", ")", "for", "col", "in", "new_columns", ":", "dataframe", "[", "col", "]", "=", "None" ]
Add new metrics as new columns to selected pandas dataframe. Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. metrics : metric.EvalMetric New metrics to be added.
[ "Add", "new", "metrics", "as", "new", "columns", "to", "selected", "pandas", "dataframe", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/notebook/callback.py#L49-L62
24,103
apache/incubator-mxnet
python/mxnet/notebook/callback.py
PandasLogger.append_metrics
def append_metrics(self, metrics, df_name): """Append new metrics to selected dataframes. Parameters ---------- metrics : metric.EvalMetric New metrics to be added. df_name : str Name of the dataframe to be modified. """ dataframe = self._dataframes[df_name] _add_new_columns(dataframe, metrics) dataframe.loc[len(dataframe)] = metrics
python
def append_metrics(self, metrics, df_name): """Append new metrics to selected dataframes. Parameters ---------- metrics : metric.EvalMetric New metrics to be added. df_name : str Name of the dataframe to be modified. """ dataframe = self._dataframes[df_name] _add_new_columns(dataframe, metrics) dataframe.loc[len(dataframe)] = metrics
[ "def", "append_metrics", "(", "self", ",", "metrics", ",", "df_name", ")", ":", "dataframe", "=", "self", ".", "_dataframes", "[", "df_name", "]", "_add_new_columns", "(", "dataframe", ",", "metrics", ")", "dataframe", ".", "loc", "[", "len", "(", "dataframe", ")", "]", "=", "metrics" ]
Append new metrics to selected dataframes. Parameters ---------- metrics : metric.EvalMetric New metrics to be added. df_name : str Name of the dataframe to be modified.
[ "Append", "new", "metrics", "to", "selected", "dataframes", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/notebook/callback.py#L130-L142
24,104
apache/incubator-mxnet
python/mxnet/notebook/callback.py
PandasLogger.train_cb
def train_cb(self, param): """Callback funtion for training. """ if param.nbatch % self.frequent == 0: self._process_batch(param, 'train')
python
def train_cb(self, param): """Callback funtion for training. """ if param.nbatch % self.frequent == 0: self._process_batch(param, 'train')
[ "def", "train_cb", "(", "self", ",", "param", ")", ":", "if", "param", ".", "nbatch", "%", "self", ".", "frequent", "==", "0", ":", "self", ".", "_process_batch", "(", "param", ",", "'train'", ")" ]
Callback funtion for training.
[ "Callback", "funtion", "for", "training", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/notebook/callback.py#L144-L148
24,105
apache/incubator-mxnet
python/mxnet/notebook/callback.py
PandasLogger.epoch_cb
def epoch_cb(self): """Callback function after each epoch. Now it records each epoch time and append it to epoch dataframe. """ metrics = {} metrics['elapsed'] = self.elapsed() now = datetime.datetime.now() metrics['epoch_time'] = now - self.last_epoch_time self.append_metrics(metrics, 'epoch') self.last_epoch_time = now
python
def epoch_cb(self): """Callback function after each epoch. Now it records each epoch time and append it to epoch dataframe. """ metrics = {} metrics['elapsed'] = self.elapsed() now = datetime.datetime.now() metrics['epoch_time'] = now - self.last_epoch_time self.append_metrics(metrics, 'epoch') self.last_epoch_time = now
[ "def", "epoch_cb", "(", "self", ")", ":", "metrics", "=", "{", "}", "metrics", "[", "'elapsed'", "]", "=", "self", ".", "elapsed", "(", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "metrics", "[", "'epoch_time'", "]", "=", "now", "-", "self", ".", "last_epoch_time", "self", ".", "append_metrics", "(", "metrics", ",", "'epoch'", ")", "self", ".", "last_epoch_time", "=", "now" ]
Callback function after each epoch. Now it records each epoch time and append it to epoch dataframe.
[ "Callback", "function", "after", "each", "epoch", ".", "Now", "it", "records", "each", "epoch", "time", "and", "append", "it", "to", "epoch", "dataframe", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/notebook/callback.py#L181-L190
24,106
apache/incubator-mxnet
python/mxnet/notebook/callback.py
LiveBokehChart._push_render
def _push_render(self): """Render the plot with bokeh.io and push to notebook. """ bokeh.io.push_notebook(handle=self.handle) self.last_update = time.time()
python
def _push_render(self): """Render the plot with bokeh.io and push to notebook. """ bokeh.io.push_notebook(handle=self.handle) self.last_update = time.time()
[ "def", "_push_render", "(", "self", ")", ":", "bokeh", ".", "io", ".", "push_notebook", "(", "handle", "=", "self", ".", "handle", ")", "self", ".", "last_update", "=", "time", ".", "time", "(", ")" ]
Render the plot with bokeh.io and push to notebook.
[ "Render", "the", "plot", "with", "bokeh", ".", "io", "and", "push", "to", "notebook", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/notebook/callback.py#L243-L247
24,107
apache/incubator-mxnet
python/mxnet/contrib/text/vocab.py
Vocabulary.to_indices
def to_indices(self, tokens): """Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary. """ to_reduce = False if not isinstance(tokens, list): tokens = [tokens] to_reduce = True indices = [self.token_to_idx[token] if token in self.token_to_idx else C.UNKNOWN_IDX for token in tokens] return indices[0] if to_reduce else indices
python
def to_indices(self, tokens): """Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary. """ to_reduce = False if not isinstance(tokens, list): tokens = [tokens] to_reduce = True indices = [self.token_to_idx[token] if token in self.token_to_idx else C.UNKNOWN_IDX for token in tokens] return indices[0] if to_reduce else indices
[ "def", "to_indices", "(", "self", ",", "tokens", ")", ":", "to_reduce", "=", "False", "if", "not", "isinstance", "(", "tokens", ",", "list", ")", ":", "tokens", "=", "[", "tokens", "]", "to_reduce", "=", "True", "indices", "=", "[", "self", ".", "token_to_idx", "[", "token", "]", "if", "token", "in", "self", ".", "token_to_idx", "else", "C", ".", "UNKNOWN_IDX", "for", "token", "in", "tokens", "]", "return", "indices", "[", "0", "]", "if", "to_reduce", "else", "indices" ]
Converts tokens to indices according to the vocabulary. Parameters ---------- tokens : str or list of strs A source token or tokens to be converted. Returns ------- int or list of ints A token index or a list of token indices according to the vocabulary.
[ "Converts", "tokens", "to", "indices", "according", "to", "the", "vocabulary", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/text/vocab.py#L162-L186
24,108
apache/incubator-mxnet
python/mxnet/io/io.py
_make_io_iterator
def _make_io_iterator(handle): """Create an io iterator by handle.""" name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXDataIterGetIterInfo( \ handle, ctypes.byref(name), ctypes.byref(desc), \ ctypes.byref(num_args), \ ctypes.byref(arg_names), \ ctypes.byref(arg_types), \ ctypes.byref(arg_descs))) iter_name = py_str(name.value) narg = int(num_args.value) param_str = _build_param_doc( [py_str(arg_names[i]) for i in range(narg)], [py_str(arg_types[i]) for i in range(narg)], [py_str(arg_descs[i]) for i in range(narg)]) doc_str = ('%s\n\n' + '%s\n' + 'Returns\n' + '-------\n' + 'MXDataIter\n'+ ' The result iterator.') doc_str = doc_str % (desc.value, param_str) def creator(*args, **kwargs): """Create an iterator. The parameters listed below can be passed in as keyword arguments. Parameters ---------- name : string, required. Name of the resulting data iterator. Returns ------- dataiter: Dataiter The resulting data iterator. """ param_keys = [] param_vals = [] for k, val in kwargs.items(): param_keys.append(k) param_vals.append(str(val)) # create atomic symbol param_keys = c_str_array(param_keys) param_vals = c_str_array(param_vals) iter_handle = DataIterHandle() check_call(_LIB.MXDataIterCreateIter( handle, mx_uint(len(param_keys)), param_keys, param_vals, ctypes.byref(iter_handle))) if len(args): raise TypeError('%s can only accept keyword arguments' % iter_name) return MXDataIter(iter_handle, **kwargs) creator.__name__ = iter_name creator.__doc__ = doc_str return creator
python
def _make_io_iterator(handle): """Create an io iterator by handle.""" name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXDataIterGetIterInfo( \ handle, ctypes.byref(name), ctypes.byref(desc), \ ctypes.byref(num_args), \ ctypes.byref(arg_names), \ ctypes.byref(arg_types), \ ctypes.byref(arg_descs))) iter_name = py_str(name.value) narg = int(num_args.value) param_str = _build_param_doc( [py_str(arg_names[i]) for i in range(narg)], [py_str(arg_types[i]) for i in range(narg)], [py_str(arg_descs[i]) for i in range(narg)]) doc_str = ('%s\n\n' + '%s\n' + 'Returns\n' + '-------\n' + 'MXDataIter\n'+ ' The result iterator.') doc_str = doc_str % (desc.value, param_str) def creator(*args, **kwargs): """Create an iterator. The parameters listed below can be passed in as keyword arguments. Parameters ---------- name : string, required. Name of the resulting data iterator. Returns ------- dataiter: Dataiter The resulting data iterator. """ param_keys = [] param_vals = [] for k, val in kwargs.items(): param_keys.append(k) param_vals.append(str(val)) # create atomic symbol param_keys = c_str_array(param_keys) param_vals = c_str_array(param_vals) iter_handle = DataIterHandle() check_call(_LIB.MXDataIterCreateIter( handle, mx_uint(len(param_keys)), param_keys, param_vals, ctypes.byref(iter_handle))) if len(args): raise TypeError('%s can only accept keyword arguments' % iter_name) return MXDataIter(iter_handle, **kwargs) creator.__name__ = iter_name creator.__doc__ = doc_str return creator
[ "def", "_make_io_iterator", "(", "handle", ")", ":", "name", "=", "ctypes", ".", "c_char_p", "(", ")", "desc", "=", "ctypes", ".", "c_char_p", "(", ")", "num_args", "=", "mx_uint", "(", ")", "arg_names", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char_p", ")", "(", ")", "arg_types", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char_p", ")", "(", ")", "arg_descs", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char_p", ")", "(", ")", "check_call", "(", "_LIB", ".", "MXDataIterGetIterInfo", "(", "handle", ",", "ctypes", ".", "byref", "(", "name", ")", ",", "ctypes", ".", "byref", "(", "desc", ")", ",", "ctypes", ".", "byref", "(", "num_args", ")", ",", "ctypes", ".", "byref", "(", "arg_names", ")", ",", "ctypes", ".", "byref", "(", "arg_types", ")", ",", "ctypes", ".", "byref", "(", "arg_descs", ")", ")", ")", "iter_name", "=", "py_str", "(", "name", ".", "value", ")", "narg", "=", "int", "(", "num_args", ".", "value", ")", "param_str", "=", "_build_param_doc", "(", "[", "py_str", "(", "arg_names", "[", "i", "]", ")", "for", "i", "in", "range", "(", "narg", ")", "]", ",", "[", "py_str", "(", "arg_types", "[", "i", "]", ")", "for", "i", "in", "range", "(", "narg", ")", "]", ",", "[", "py_str", "(", "arg_descs", "[", "i", "]", ")", "for", "i", "in", "range", "(", "narg", ")", "]", ")", "doc_str", "=", "(", "'%s\\n\\n'", "+", "'%s\\n'", "+", "'Returns\\n'", "+", "'-------\\n'", "+", "'MXDataIter\\n'", "+", "' The result iterator.'", ")", "doc_str", "=", "doc_str", "%", "(", "desc", ".", "value", ",", "param_str", ")", "def", "creator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Create an iterator.\n The parameters listed below can be passed in as keyword arguments.\n\n Parameters\n ----------\n name : string, required.\n Name of the resulting data iterator.\n\n Returns\n -------\n dataiter: Dataiter\n The resulting data iterator.\n \"\"\"", "param_keys", "=", "[", "]", "param_vals", "=", "[", "]", "for", "k", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "param_keys", ".", "append", "(", "k", ")", "param_vals", ".", "append", "(", "str", "(", "val", ")", ")", "# create atomic symbol", "param_keys", "=", "c_str_array", "(", "param_keys", ")", "param_vals", "=", "c_str_array", "(", "param_vals", ")", "iter_handle", "=", "DataIterHandle", "(", ")", "check_call", "(", "_LIB", ".", "MXDataIterCreateIter", "(", "handle", ",", "mx_uint", "(", "len", "(", "param_keys", ")", ")", ",", "param_keys", ",", "param_vals", ",", "ctypes", ".", "byref", "(", "iter_handle", ")", ")", ")", "if", "len", "(", "args", ")", ":", "raise", "TypeError", "(", "'%s can only accept keyword arguments'", "%", "iter_name", ")", "return", "MXDataIter", "(", "iter_handle", ",", "*", "*", "kwargs", ")", "creator", ".", "__name__", "=", "iter_name", "creator", ".", "__doc__", "=", "doc_str", "return", "creator" ]
Create an io iterator by handle.
[ "Create", "an", "io", "iterator", "by", "handle", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L899-L967
24,109
apache/incubator-mxnet
python/mxnet/io/io.py
_init_io_module
def _init_io_module(): """List and add all the data iterators to current module.""" plist = ctypes.POINTER(ctypes.c_void_p)() size = ctypes.c_uint() check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist))) module_obj = sys.modules[__name__] for i in range(size.value): hdl = ctypes.c_void_p(plist[i]) dataiter = _make_io_iterator(hdl) setattr(module_obj, dataiter.__name__, dataiter)
python
def _init_io_module(): """List and add all the data iterators to current module.""" plist = ctypes.POINTER(ctypes.c_void_p)() size = ctypes.c_uint() check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist))) module_obj = sys.modules[__name__] for i in range(size.value): hdl = ctypes.c_void_p(plist[i]) dataiter = _make_io_iterator(hdl) setattr(module_obj, dataiter.__name__, dataiter)
[ "def", "_init_io_module", "(", ")", ":", "plist", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_void_p", ")", "(", ")", "size", "=", "ctypes", ".", "c_uint", "(", ")", "check_call", "(", "_LIB", ".", "MXListDataIters", "(", "ctypes", ".", "byref", "(", "size", ")", ",", "ctypes", ".", "byref", "(", "plist", ")", ")", ")", "module_obj", "=", "sys", ".", "modules", "[", "__name__", "]", "for", "i", "in", "range", "(", "size", ".", "value", ")", ":", "hdl", "=", "ctypes", ".", "c_void_p", "(", "plist", "[", "i", "]", ")", "dataiter", "=", "_make_io_iterator", "(", "hdl", ")", "setattr", "(", "module_obj", ",", "dataiter", ".", "__name__", ",", "dataiter", ")" ]
List and add all the data iterators to current module.
[ "List", "and", "add", "all", "the", "data", "iterators", "to", "current", "module", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L969-L978
24,110
apache/incubator-mxnet
python/mxnet/io/io.py
DataDesc.get_list
def get_list(shapes, types): """Get DataDesc list from attribute lists. Parameters ---------- shapes : a tuple of (name_, shape_) types : a tuple of (name_, np.dtype) """ if types is not None: type_dict = dict(types) return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes] else: return [DataDesc(x[0], x[1]) for x in shapes]
python
def get_list(shapes, types): """Get DataDesc list from attribute lists. Parameters ---------- shapes : a tuple of (name_, shape_) types : a tuple of (name_, np.dtype) """ if types is not None: type_dict = dict(types) return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes] else: return [DataDesc(x[0], x[1]) for x in shapes]
[ "def", "get_list", "(", "shapes", ",", "types", ")", ":", "if", "types", "is", "not", "None", ":", "type_dict", "=", "dict", "(", "types", ")", "return", "[", "DataDesc", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ",", "type_dict", "[", "x", "[", "0", "]", "]", ")", "for", "x", "in", "shapes", "]", "else", ":", "return", "[", "DataDesc", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", "for", "x", "in", "shapes", "]" ]
Get DataDesc list from attribute lists. Parameters ---------- shapes : a tuple of (name_, shape_) types : a tuple of (name_, np.dtype)
[ "Get", "DataDesc", "list", "from", "attribute", "lists", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L100-L112
24,111
apache/incubator-mxnet
python/mxnet/io/io.py
DataIter.next
def next(self): """Get next data batch from iterator. Returns ------- DataBatch The data of next batch. Raises ------ StopIteration If the end of the data is reached. """ if self.iter_next(): return DataBatch(data=self.getdata(), label=self.getlabel(), \ pad=self.getpad(), index=self.getindex()) else: raise StopIteration
python
def next(self): """Get next data batch from iterator. Returns ------- DataBatch The data of next batch. Raises ------ StopIteration If the end of the data is reached. """ if self.iter_next(): return DataBatch(data=self.getdata(), label=self.getlabel(), \ pad=self.getpad(), index=self.getindex()) else: raise StopIteration
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "iter_next", "(", ")", ":", "return", "DataBatch", "(", "data", "=", "self", ".", "getdata", "(", ")", ",", "label", "=", "self", ".", "getlabel", "(", ")", ",", "pad", "=", "self", ".", "getpad", "(", ")", ",", "index", "=", "self", ".", "getindex", "(", ")", ")", "else", ":", "raise", "StopIteration" ]
Get next data batch from iterator. Returns ------- DataBatch The data of next batch. Raises ------ StopIteration If the end of the data is reached.
[ "Get", "next", "data", "batch", "from", "iterator", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L208-L225
24,112
apache/incubator-mxnet
python/mxnet/io/io.py
NDArrayIter.hard_reset
def hard_reset(self): """Ignore roll over data and set to start.""" if self.shuffle: self._shuffle_data() self.cursor = -self.batch_size self._cache_data = None self._cache_label = None
python
def hard_reset(self): """Ignore roll over data and set to start.""" if self.shuffle: self._shuffle_data() self.cursor = -self.batch_size self._cache_data = None self._cache_label = None
[ "def", "hard_reset", "(", "self", ")", ":", "if", "self", ".", "shuffle", ":", "self", ".", "_shuffle_data", "(", ")", "self", ".", "cursor", "=", "-", "self", ".", "batch_size", "self", ".", "_cache_data", "=", "None", "self", ".", "_cache_label", "=", "None" ]
Ignore roll over data and set to start.
[ "Ignore", "roll", "over", "data", "and", "set", "to", "start", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L650-L656
24,113
apache/incubator-mxnet
python/mxnet/io/io.py
NDArrayIter.iter_next
def iter_next(self): """Increments the coursor by batch_size for next batch and check current cursor if it exceed the number of data points.""" self.cursor += self.batch_size return self.cursor < self.num_data
python
def iter_next(self): """Increments the coursor by batch_size for next batch and check current cursor if it exceed the number of data points.""" self.cursor += self.batch_size return self.cursor < self.num_data
[ "def", "iter_next", "(", "self", ")", ":", "self", ".", "cursor", "+=", "self", ".", "batch_size", "return", "self", ".", "cursor", "<", "self", ".", "num_data" ]
Increments the coursor by batch_size for next batch and check current cursor if it exceed the number of data points.
[ "Increments", "the", "coursor", "by", "batch_size", "for", "next", "batch", "and", "check", "current", "cursor", "if", "it", "exceed", "the", "number", "of", "data", "points", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L670-L674
24,114
apache/incubator-mxnet
python/mxnet/io/io.py
NDArrayIter._getdata
def _getdata(self, data_source, start=None, end=None): """Load data from underlying arrays.""" assert start is not None or end is not None, 'should at least specify start or end' start = start if start is not None else 0 if end is None: end = data_source[0][1].shape[0] if data_source else 0 s = slice(start, end) return [ x[1][s] if isinstance(x[1], (np.ndarray, NDArray)) else # h5py (only supports indices in increasing order) array(x[1][sorted(self.idx[s])][[ list(self.idx[s]).index(i) for i in sorted(self.idx[s]) ]]) for x in data_source ]
python
def _getdata(self, data_source, start=None, end=None): """Load data from underlying arrays.""" assert start is not None or end is not None, 'should at least specify start or end' start = start if start is not None else 0 if end is None: end = data_source[0][1].shape[0] if data_source else 0 s = slice(start, end) return [ x[1][s] if isinstance(x[1], (np.ndarray, NDArray)) else # h5py (only supports indices in increasing order) array(x[1][sorted(self.idx[s])][[ list(self.idx[s]).index(i) for i in sorted(self.idx[s]) ]]) for x in data_source ]
[ "def", "_getdata", "(", "self", ",", "data_source", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "assert", "start", "is", "not", "None", "or", "end", "is", "not", "None", ",", "'should at least specify start or end'", "start", "=", "start", "if", "start", "is", "not", "None", "else", "0", "if", "end", "is", "None", ":", "end", "=", "data_source", "[", "0", "]", "[", "1", "]", ".", "shape", "[", "0", "]", "if", "data_source", "else", "0", "s", "=", "slice", "(", "start", ",", "end", ")", "return", "[", "x", "[", "1", "]", "[", "s", "]", "if", "isinstance", "(", "x", "[", "1", "]", ",", "(", "np", ".", "ndarray", ",", "NDArray", ")", ")", "else", "# h5py (only supports indices in increasing order)", "array", "(", "x", "[", "1", "]", "[", "sorted", "(", "self", ".", "idx", "[", "s", "]", ")", "]", "[", "[", "list", "(", "self", ".", "idx", "[", "s", "]", ")", ".", "index", "(", "i", ")", "for", "i", "in", "sorted", "(", "self", ".", "idx", "[", "s", "]", ")", "]", "]", ")", "for", "x", "in", "data_source", "]" ]
Load data from underlying arrays.
[ "Load", "data", "from", "underlying", "arrays", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L691-L706
24,115
apache/incubator-mxnet
python/mxnet/io/io.py
NDArrayIter._concat
def _concat(self, first_data, second_data): """Helper function to concat two NDArrays.""" assert len(first_data) == len( second_data), 'data source should contain the same size' if first_data and second_data: return [ concat( first_data[x], second_data[x], dim=0 ) for x in range(len(first_data)) ] elif (not first_data) and (not second_data): return [] else: return [ first_data[0] if first_data else second_data[0] for x in range(len(first_data)) ]
python
def _concat(self, first_data, second_data): """Helper function to concat two NDArrays.""" assert len(first_data) == len( second_data), 'data source should contain the same size' if first_data and second_data: return [ concat( first_data[x], second_data[x], dim=0 ) for x in range(len(first_data)) ] elif (not first_data) and (not second_data): return [] else: return [ first_data[0] if first_data else second_data[0] for x in range(len(first_data)) ]
[ "def", "_concat", "(", "self", ",", "first_data", ",", "second_data", ")", ":", "assert", "len", "(", "first_data", ")", "==", "len", "(", "second_data", ")", ",", "'data source should contain the same size'", "if", "first_data", "and", "second_data", ":", "return", "[", "concat", "(", "first_data", "[", "x", "]", ",", "second_data", "[", "x", "]", ",", "dim", "=", "0", ")", "for", "x", "in", "range", "(", "len", "(", "first_data", ")", ")", "]", "elif", "(", "not", "first_data", ")", "and", "(", "not", "second_data", ")", ":", "return", "[", "]", "else", ":", "return", "[", "first_data", "[", "0", "]", "if", "first_data", "else", "second_data", "[", "0", "]", "for", "x", "in", "range", "(", "len", "(", "first_data", ")", ")", "]" ]
Helper function to concat two NDArrays.
[ "Helper", "function", "to", "concat", "two", "NDArrays", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L708-L726
24,116
apache/incubator-mxnet
python/mxnet/io/io.py
NDArrayIter._batchify
def _batchify(self, data_source): """Load data from underlying arrays, internal use only.""" assert self.cursor < self.num_data, 'DataIter needs reset.' # first batch of next epoch with 'roll_over' if self.last_batch_handle == 'roll_over' and \ -self.batch_size < self.cursor < 0: assert self._cache_data is not None or self._cache_label is not None, \ 'next epoch should have cached data' cache_data = self._cache_data if self._cache_data is not None else self._cache_label second_data = self._getdata( data_source, end=self.cursor + self.batch_size) if self._cache_data is not None: self._cache_data = None else: self._cache_label = None return self._concat(cache_data, second_data) # last batch with 'pad' elif self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: pad = self.batch_size - self.num_data + self.cursor first_data = self._getdata(data_source, start=self.cursor) second_data = self._getdata(data_source, end=pad) return self._concat(first_data, second_data) # normal case else: if self.cursor + self.batch_size < self.num_data: end_idx = self.cursor + self.batch_size # get incomplete last batch else: end_idx = self.num_data return self._getdata(data_source, self.cursor, end_idx)
python
def _batchify(self, data_source): """Load data from underlying arrays, internal use only.""" assert self.cursor < self.num_data, 'DataIter needs reset.' # first batch of next epoch with 'roll_over' if self.last_batch_handle == 'roll_over' and \ -self.batch_size < self.cursor < 0: assert self._cache_data is not None or self._cache_label is not None, \ 'next epoch should have cached data' cache_data = self._cache_data if self._cache_data is not None else self._cache_label second_data = self._getdata( data_source, end=self.cursor + self.batch_size) if self._cache_data is not None: self._cache_data = None else: self._cache_label = None return self._concat(cache_data, second_data) # last batch with 'pad' elif self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: pad = self.batch_size - self.num_data + self.cursor first_data = self._getdata(data_source, start=self.cursor) second_data = self._getdata(data_source, end=pad) return self._concat(first_data, second_data) # normal case else: if self.cursor + self.batch_size < self.num_data: end_idx = self.cursor + self.batch_size # get incomplete last batch else: end_idx = self.num_data return self._getdata(data_source, self.cursor, end_idx)
[ "def", "_batchify", "(", "self", ",", "data_source", ")", ":", "assert", "self", ".", "cursor", "<", "self", ".", "num_data", ",", "'DataIter needs reset.'", "# first batch of next epoch with 'roll_over'", "if", "self", ".", "last_batch_handle", "==", "'roll_over'", "and", "-", "self", ".", "batch_size", "<", "self", ".", "cursor", "<", "0", ":", "assert", "self", ".", "_cache_data", "is", "not", "None", "or", "self", ".", "_cache_label", "is", "not", "None", ",", "'next epoch should have cached data'", "cache_data", "=", "self", ".", "_cache_data", "if", "self", ".", "_cache_data", "is", "not", "None", "else", "self", ".", "_cache_label", "second_data", "=", "self", ".", "_getdata", "(", "data_source", ",", "end", "=", "self", ".", "cursor", "+", "self", ".", "batch_size", ")", "if", "self", ".", "_cache_data", "is", "not", "None", ":", "self", ".", "_cache_data", "=", "None", "else", ":", "self", ".", "_cache_label", "=", "None", "return", "self", ".", "_concat", "(", "cache_data", ",", "second_data", ")", "# last batch with 'pad'", "elif", "self", ".", "last_batch_handle", "==", "'pad'", "and", "self", ".", "cursor", "+", "self", ".", "batch_size", ">", "self", ".", "num_data", ":", "pad", "=", "self", ".", "batch_size", "-", "self", ".", "num_data", "+", "self", ".", "cursor", "first_data", "=", "self", ".", "_getdata", "(", "data_source", ",", "start", "=", "self", ".", "cursor", ")", "second_data", "=", "self", ".", "_getdata", "(", "data_source", ",", "end", "=", "pad", ")", "return", "self", ".", "_concat", "(", "first_data", ",", "second_data", ")", "# normal case", "else", ":", "if", "self", ".", "cursor", "+", "self", ".", "batch_size", "<", "self", ".", "num_data", ":", "end_idx", "=", "self", ".", "cursor", "+", "self", ".", "batch_size", "# get incomplete last batch", "else", ":", "end_idx", "=", "self", ".", "num_data", "return", "self", ".", "_getdata", "(", "data_source", ",", "self", ".", "cursor", ",", "end_idx", ")" ]
Load data from underlying arrays, internal use only.
[ "Load", "data", "from", "underlying", "arrays", "internal", "use", "only", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L728-L758
24,117
apache/incubator-mxnet
python/mxnet/io/io.py
NDArrayIter.getpad
def getpad(self): """Get pad value of DataBatch.""" if self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: return self.cursor + self.batch_size - self.num_data # check the first batch elif self.last_batch_handle == 'roll_over' and \ -self.batch_size < self.cursor < 0: return -self.cursor else: return 0
python
def getpad(self): """Get pad value of DataBatch.""" if self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: return self.cursor + self.batch_size - self.num_data # check the first batch elif self.last_batch_handle == 'roll_over' and \ -self.batch_size < self.cursor < 0: return -self.cursor else: return 0
[ "def", "getpad", "(", "self", ")", ":", "if", "self", ".", "last_batch_handle", "==", "'pad'", "and", "self", ".", "cursor", "+", "self", ".", "batch_size", ">", "self", ".", "num_data", ":", "return", "self", ".", "cursor", "+", "self", ".", "batch_size", "-", "self", ".", "num_data", "# check the first batch", "elif", "self", ".", "last_batch_handle", "==", "'roll_over'", "and", "-", "self", ".", "batch_size", "<", "self", ".", "cursor", "<", "0", ":", "return", "-", "self", ".", "cursor", "else", ":", "return", "0" ]
Get pad value of DataBatch.
[ "Get", "pad", "value", "of", "DataBatch", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L768-L778
24,118
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_quantize_params
def _quantize_params(qsym, params, th_dict): """Given a quantized symbol and a dict of params that have not been quantized, generate quantized params. Currently only supports quantizing the arg_params with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols that are excluded from being quantized, their corresponding params will not be quantized, but saved together with quantized params of the symbols that have been quantized. Parameters ---------- qsym : Symbol Quantized symbol from FP32 symbol. params : dict of str->NDArray th_dict: dict of min/max pairs of layers' output """ inputs_name = qsym.list_arguments() quantized_params = {} for name in inputs_name: if name.endswith(('weight_quantize', 'bias_quantize')): original_name = name[:-len('_quantize')] param = params[original_name] val, vmin, vmax = ndarray.contrib.quantize(data=param, min_range=ndarray.min(param), max_range=ndarray.max(param), out_type='int8') quantized_params[name] = val quantized_params[name+'_min'] = vmin quantized_params[name+'_max'] = vmax elif name in params: quantized_params[name] = params[name] elif name.endswith(('_min')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][0]]) elif name.endswith(('_max')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][1]]) return quantized_params
python
def _quantize_params(qsym, params, th_dict): """Given a quantized symbol and a dict of params that have not been quantized, generate quantized params. Currently only supports quantizing the arg_params with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols that are excluded from being quantized, their corresponding params will not be quantized, but saved together with quantized params of the symbols that have been quantized. Parameters ---------- qsym : Symbol Quantized symbol from FP32 symbol. params : dict of str->NDArray th_dict: dict of min/max pairs of layers' output """ inputs_name = qsym.list_arguments() quantized_params = {} for name in inputs_name: if name.endswith(('weight_quantize', 'bias_quantize')): original_name = name[:-len('_quantize')] param = params[original_name] val, vmin, vmax = ndarray.contrib.quantize(data=param, min_range=ndarray.min(param), max_range=ndarray.max(param), out_type='int8') quantized_params[name] = val quantized_params[name+'_min'] = vmin quantized_params[name+'_max'] = vmax elif name in params: quantized_params[name] = params[name] elif name.endswith(('_min')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][0]]) elif name.endswith(('_max')): output = name[: - len('_min')] if output in th_dict: quantized_params[name] = ndarray.array([th_dict[output][1]]) return quantized_params
[ "def", "_quantize_params", "(", "qsym", ",", "params", ",", "th_dict", ")", ":", "inputs_name", "=", "qsym", ".", "list_arguments", "(", ")", "quantized_params", "=", "{", "}", "for", "name", "in", "inputs_name", ":", "if", "name", ".", "endswith", "(", "(", "'weight_quantize'", ",", "'bias_quantize'", ")", ")", ":", "original_name", "=", "name", "[", ":", "-", "len", "(", "'_quantize'", ")", "]", "param", "=", "params", "[", "original_name", "]", "val", ",", "vmin", ",", "vmax", "=", "ndarray", ".", "contrib", ".", "quantize", "(", "data", "=", "param", ",", "min_range", "=", "ndarray", ".", "min", "(", "param", ")", ",", "max_range", "=", "ndarray", ".", "max", "(", "param", ")", ",", "out_type", "=", "'int8'", ")", "quantized_params", "[", "name", "]", "=", "val", "quantized_params", "[", "name", "+", "'_min'", "]", "=", "vmin", "quantized_params", "[", "name", "+", "'_max'", "]", "=", "vmax", "elif", "name", "in", "params", ":", "quantized_params", "[", "name", "]", "=", "params", "[", "name", "]", "elif", "name", ".", "endswith", "(", "(", "'_min'", ")", ")", ":", "output", "=", "name", "[", ":", "-", "len", "(", "'_min'", ")", "]", "if", "output", "in", "th_dict", ":", "quantized_params", "[", "name", "]", "=", "ndarray", ".", "array", "(", "[", "th_dict", "[", "output", "]", "[", "0", "]", "]", ")", "elif", "name", ".", "endswith", "(", "(", "'_max'", ")", ")", ":", "output", "=", "name", "[", ":", "-", "len", "(", "'_min'", ")", "]", "if", "output", "in", "th_dict", ":", "quantized_params", "[", "name", "]", "=", "ndarray", ".", "array", "(", "[", "th_dict", "[", "output", "]", "[", "1", "]", "]", ")", "return", "quantized_params" ]
Given a quantized symbol and a dict of params that have not been quantized, generate quantized params. Currently only supports quantizing the arg_params with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols that are excluded from being quantized, their corresponding params will not be quantized, but saved together with quantized params of the symbols that have been quantized. Parameters ---------- qsym : Symbol Quantized symbol from FP32 symbol. params : dict of str->NDArray th_dict: dict of min/max pairs of layers' output
[ "Given", "a", "quantized", "symbol", "and", "a", "dict", "of", "params", "that", "have", "not", "been", "quantized", "generate", "quantized", "params", ".", "Currently", "only", "supports", "quantizing", "the", "arg_params", "with", "names", "of", "weight", "or", "bias", "not", "aux_params", ".", "If", "qsym", "contains", "symbols", "that", "are", "excluded", "from", "being", "quantized", "their", "corresponding", "params", "will", "not", "be", "quantized", "but", "saved", "together", "with", "quantized", "params", "of", "the", "symbols", "that", "have", "been", "quantized", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L43-L81
24,119
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_quantize_symbol
def _quantize_symbol(sym, excluded_symbols=None, offline_params=None, quantized_dtype='int8'): """Given a symbol object representing a neural network of data type FP32, quantize it into a INT8 network. Parameters ---------- sym : Symbol FP32 neural network symbol. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. offline_params : list of strs Names of the parameters that users want to quantize offline. It's always recommended to quantize parameters offline so that quantizing parameters during the inference can be avoided. quantized_dtype: str The quantized destination type for input data. """ num_excluded_symbols = 0 if excluded_symbols is not None: assert isinstance(excluded_symbols, list) num_excluded_symbols = len(excluded_symbols) else: excluded_symbols = [] num_offline = 0 offline = [] if offline_params is not None: num_offline = len(offline_params) for k in offline_params: offline.append(c_str(k)) out = SymbolHandle() check_call(_LIB.MXQuantizeSymbol(sym.handle, ctypes.byref(out), mx_uint(num_excluded_symbols), c_str_array(excluded_symbols), mx_uint(num_offline), c_array(ctypes.c_char_p, offline), c_str(quantized_dtype), ctypes.c_bool(True))) return Symbol(out)
python
def _quantize_symbol(sym, excluded_symbols=None, offline_params=None, quantized_dtype='int8'): """Given a symbol object representing a neural network of data type FP32, quantize it into a INT8 network. Parameters ---------- sym : Symbol FP32 neural network symbol. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. offline_params : list of strs Names of the parameters that users want to quantize offline. It's always recommended to quantize parameters offline so that quantizing parameters during the inference can be avoided. quantized_dtype: str The quantized destination type for input data. """ num_excluded_symbols = 0 if excluded_symbols is not None: assert isinstance(excluded_symbols, list) num_excluded_symbols = len(excluded_symbols) else: excluded_symbols = [] num_offline = 0 offline = [] if offline_params is not None: num_offline = len(offline_params) for k in offline_params: offline.append(c_str(k)) out = SymbolHandle() check_call(_LIB.MXQuantizeSymbol(sym.handle, ctypes.byref(out), mx_uint(num_excluded_symbols), c_str_array(excluded_symbols), mx_uint(num_offline), c_array(ctypes.c_char_p, offline), c_str(quantized_dtype), ctypes.c_bool(True))) return Symbol(out)
[ "def", "_quantize_symbol", "(", "sym", ",", "excluded_symbols", "=", "None", ",", "offline_params", "=", "None", ",", "quantized_dtype", "=", "'int8'", ")", ":", "num_excluded_symbols", "=", "0", "if", "excluded_symbols", "is", "not", "None", ":", "assert", "isinstance", "(", "excluded_symbols", ",", "list", ")", "num_excluded_symbols", "=", "len", "(", "excluded_symbols", ")", "else", ":", "excluded_symbols", "=", "[", "]", "num_offline", "=", "0", "offline", "=", "[", "]", "if", "offline_params", "is", "not", "None", ":", "num_offline", "=", "len", "(", "offline_params", ")", "for", "k", "in", "offline_params", ":", "offline", ".", "append", "(", "c_str", "(", "k", ")", ")", "out", "=", "SymbolHandle", "(", ")", "check_call", "(", "_LIB", ".", "MXQuantizeSymbol", "(", "sym", ".", "handle", ",", "ctypes", ".", "byref", "(", "out", ")", ",", "mx_uint", "(", "num_excluded_symbols", ")", ",", "c_str_array", "(", "excluded_symbols", ")", ",", "mx_uint", "(", "num_offline", ")", ",", "c_array", "(", "ctypes", ".", "c_char_p", ",", "offline", ")", ",", "c_str", "(", "quantized_dtype", ")", ",", "ctypes", ".", "c_bool", "(", "True", ")", ")", ")", "return", "Symbol", "(", "out", ")" ]
Given a symbol object representing a neural network of data type FP32, quantize it into a INT8 network. Parameters ---------- sym : Symbol FP32 neural network symbol. excluded_sym_names : list of strings A list of strings representing the names of the symbols that users want to excluding from being quantized. offline_params : list of strs Names of the parameters that users want to quantize offline. It's always recommended to quantize parameters offline so that quantizing parameters during the inference can be avoided. quantized_dtype: str The quantized destination type for input data.
[ "Given", "a", "symbol", "object", "representing", "a", "neural", "network", "of", "data", "type", "FP32", "quantize", "it", "into", "a", "INT8", "network", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L83-L124
24,120
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_calibrate_quantized_sym
def _calibrate_quantized_sym(qsym, th_dict): """Given a dictionary containing the thresholds for quantizing the layers, set the thresholds into the quantized symbol as the params of requantize operators. """ if th_dict is None or len(th_dict) == 0: return qsym num_layer_outputs = len(th_dict) layer_output_names = [] min_vals = [] max_vals = [] for k, v in th_dict.items(): layer_output_names.append(k) min_vals.append(v[0]) max_vals.append(v[1]) calibrated_sym = SymbolHandle() check_call(_LIB.MXSetCalibTableToQuantizedSymbol(qsym.handle, mx_uint(num_layer_outputs), c_str_array(layer_output_names), c_array(ctypes.c_float, min_vals), c_array(ctypes.c_float, max_vals), ctypes.byref(calibrated_sym))) return Symbol(calibrated_sym)
python
def _calibrate_quantized_sym(qsym, th_dict): """Given a dictionary containing the thresholds for quantizing the layers, set the thresholds into the quantized symbol as the params of requantize operators. """ if th_dict is None or len(th_dict) == 0: return qsym num_layer_outputs = len(th_dict) layer_output_names = [] min_vals = [] max_vals = [] for k, v in th_dict.items(): layer_output_names.append(k) min_vals.append(v[0]) max_vals.append(v[1]) calibrated_sym = SymbolHandle() check_call(_LIB.MXSetCalibTableToQuantizedSymbol(qsym.handle, mx_uint(num_layer_outputs), c_str_array(layer_output_names), c_array(ctypes.c_float, min_vals), c_array(ctypes.c_float, max_vals), ctypes.byref(calibrated_sym))) return Symbol(calibrated_sym)
[ "def", "_calibrate_quantized_sym", "(", "qsym", ",", "th_dict", ")", ":", "if", "th_dict", "is", "None", "or", "len", "(", "th_dict", ")", "==", "0", ":", "return", "qsym", "num_layer_outputs", "=", "len", "(", "th_dict", ")", "layer_output_names", "=", "[", "]", "min_vals", "=", "[", "]", "max_vals", "=", "[", "]", "for", "k", ",", "v", "in", "th_dict", ".", "items", "(", ")", ":", "layer_output_names", ".", "append", "(", "k", ")", "min_vals", ".", "append", "(", "v", "[", "0", "]", ")", "max_vals", ".", "append", "(", "v", "[", "1", "]", ")", "calibrated_sym", "=", "SymbolHandle", "(", ")", "check_call", "(", "_LIB", ".", "MXSetCalibTableToQuantizedSymbol", "(", "qsym", ".", "handle", ",", "mx_uint", "(", "num_layer_outputs", ")", ",", "c_str_array", "(", "layer_output_names", ")", ",", "c_array", "(", "ctypes", ".", "c_float", ",", "min_vals", ")", ",", "c_array", "(", "ctypes", ".", "c_float", ",", "max_vals", ")", ",", "ctypes", ".", "byref", "(", "calibrated_sym", ")", ")", ")", "return", "Symbol", "(", "calibrated_sym", ")" ]
Given a dictionary containing the thresholds for quantizing the layers, set the thresholds into the quantized symbol as the params of requantize operators.
[ "Given", "a", "dictionary", "containing", "the", "thresholds", "for", "quantizing", "the", "layers", "set", "the", "thresholds", "into", "the", "quantized", "symbol", "as", "the", "params", "of", "requantize", "operators", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L179-L201
24,121
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_collect_layer_output_min_max
def _collect_layer_output_min_max(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect min and max values from layer outputs and save them in a dictionary mapped by layer names. """ collector = _LayerOutputMinMaxCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.min_max_dict, num_examples
python
def _collect_layer_output_min_max(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect min and max values from layer outputs and save them in a dictionary mapped by layer names. """ collector = _LayerOutputMinMaxCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.min_max_dict, num_examples
[ "def", "_collect_layer_output_min_max", "(", "mod", ",", "data", ",", "include_layer", "=", "None", ",", "max_num_examples", "=", "None", ",", "logger", "=", "None", ")", ":", "collector", "=", "_LayerOutputMinMaxCollector", "(", "include_layer", "=", "include_layer", ",", "logger", "=", "logger", ")", "num_examples", "=", "_collect_layer_statistics", "(", "mod", ",", "data", ",", "collector", ",", "max_num_examples", ",", "logger", ")", "return", "collector", ".", "min_max_dict", ",", "num_examples" ]
Collect min and max values from layer outputs and save them in a dictionary mapped by layer names.
[ "Collect", "min", "and", "max", "values", "from", "layer", "outputs", "and", "save", "them", "in", "a", "dictionary", "mapped", "by", "layer", "names", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L223-L230
24,122
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_collect_layer_outputs
def _collect_layer_outputs(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect layer outputs and save them in a dictionary mapped by layer names.""" collector = _LayerOutputCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.nd_dict, num_examples
python
def _collect_layer_outputs(mod, data, include_layer=None, max_num_examples=None, logger=None): """Collect layer outputs and save them in a dictionary mapped by layer names.""" collector = _LayerOutputCollector(include_layer=include_layer, logger=logger) num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger) return collector.nd_dict, num_examples
[ "def", "_collect_layer_outputs", "(", "mod", ",", "data", ",", "include_layer", "=", "None", ",", "max_num_examples", "=", "None", ",", "logger", "=", "None", ")", ":", "collector", "=", "_LayerOutputCollector", "(", "include_layer", "=", "include_layer", ",", "logger", "=", "logger", ")", "num_examples", "=", "_collect_layer_statistics", "(", "mod", ",", "data", ",", "collector", ",", "max_num_examples", ",", "logger", ")", "return", "collector", ".", "nd_dict", ",", "num_examples" ]
Collect layer outputs and save them in a dictionary mapped by layer names.
[ "Collect", "layer", "outputs", "and", "save", "them", "in", "a", "dictionary", "mapped", "by", "layer", "names", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L233-L237
24,123
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_get_optimal_thresholds
def _get_optimal_thresholds(nd_dict, quantized_dtype, num_bins=8001, num_quantized_bins=255, logger=None): """Given a ndarray dict, find the optimal threshold for quantizing each value of the key.""" if stats is None: raise ImportError('scipy.stats is required for running entropy mode of calculating' ' the optimal thresholds for quantizing FP32 ndarrays into int8.' ' Please check if the scipy python bindings are installed.') assert isinstance(nd_dict, dict) if logger is not None: logger.info('Calculating optimal thresholds for quantization using KL divergence' ' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins)) th_dict = {} # copy nd_dict keys since the keys() only returns a view in python3 layer_names = list(nd_dict.keys()) for name in layer_names: assert name in nd_dict min_val, max_val, min_divergence, opt_th = \ _get_optimal_threshold(nd_dict[name], quantized_dtype, num_bins=num_bins, num_quantized_bins=num_quantized_bins) del nd_dict[name] # release the memory of ndarray if min_val < 0: th_dict[name] = (-opt_th, opt_th) else: th_dict[name] = (0, opt_th) if logger is not None: logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f' % (name, min_val, max_val, min_divergence, opt_th)) return th_dict
python
def _get_optimal_thresholds(nd_dict, quantized_dtype, num_bins=8001, num_quantized_bins=255, logger=None): """Given a ndarray dict, find the optimal threshold for quantizing each value of the key.""" if stats is None: raise ImportError('scipy.stats is required for running entropy mode of calculating' ' the optimal thresholds for quantizing FP32 ndarrays into int8.' ' Please check if the scipy python bindings are installed.') assert isinstance(nd_dict, dict) if logger is not None: logger.info('Calculating optimal thresholds for quantization using KL divergence' ' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins)) th_dict = {} # copy nd_dict keys since the keys() only returns a view in python3 layer_names = list(nd_dict.keys()) for name in layer_names: assert name in nd_dict min_val, max_val, min_divergence, opt_th = \ _get_optimal_threshold(nd_dict[name], quantized_dtype, num_bins=num_bins, num_quantized_bins=num_quantized_bins) del nd_dict[name] # release the memory of ndarray if min_val < 0: th_dict[name] = (-opt_th, opt_th) else: th_dict[name] = (0, opt_th) if logger is not None: logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f' % (name, min_val, max_val, min_divergence, opt_th)) return th_dict
[ "def", "_get_optimal_thresholds", "(", "nd_dict", ",", "quantized_dtype", ",", "num_bins", "=", "8001", ",", "num_quantized_bins", "=", "255", ",", "logger", "=", "None", ")", ":", "if", "stats", "is", "None", ":", "raise", "ImportError", "(", "'scipy.stats is required for running entropy mode of calculating'", "' the optimal thresholds for quantizing FP32 ndarrays into int8.'", "' Please check if the scipy python bindings are installed.'", ")", "assert", "isinstance", "(", "nd_dict", ",", "dict", ")", "if", "logger", "is", "not", "None", ":", "logger", ".", "info", "(", "'Calculating optimal thresholds for quantization using KL divergence'", "' with num_bins=%d and num_quantized_bins=%d'", "%", "(", "num_bins", ",", "num_quantized_bins", ")", ")", "th_dict", "=", "{", "}", "# copy nd_dict keys since the keys() only returns a view in python3", "layer_names", "=", "list", "(", "nd_dict", ".", "keys", "(", ")", ")", "for", "name", "in", "layer_names", ":", "assert", "name", "in", "nd_dict", "min_val", ",", "max_val", ",", "min_divergence", ",", "opt_th", "=", "_get_optimal_threshold", "(", "nd_dict", "[", "name", "]", ",", "quantized_dtype", ",", "num_bins", "=", "num_bins", ",", "num_quantized_bins", "=", "num_quantized_bins", ")", "del", "nd_dict", "[", "name", "]", "# release the memory of ndarray", "if", "min_val", "<", "0", ":", "th_dict", "[", "name", "]", "=", "(", "-", "opt_th", ",", "opt_th", ")", "else", ":", "th_dict", "[", "name", "]", "=", "(", "0", ",", "opt_th", ")", "if", "logger", "is", "not", "None", ":", "logger", ".", "info", "(", "'layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f'", "%", "(", "name", ",", "min_val", ",", "max_val", ",", "min_divergence", ",", "opt_th", ")", ")", "return", "th_dict" ]
Given a ndarray dict, find the optimal threshold for quantizing each value of the key.
[ "Given", "a", "ndarray", "dict", "find", "the", "optimal", "threshold", "for", "quantizing", "each", "value", "of", "the", "key", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L355-L381
24,124
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_load_sym
def _load_sym(sym, logger=logging): """Given a str as a path the symbol .json file or a symbol, returns a Symbol object.""" if isinstance(sym, str): # sym is a symbol file path cur_path = os.path.dirname(os.path.realpath(__file__)) symbol_file_path = os.path.join(cur_path, sym) logger.info('Loading symbol from file %s' % symbol_file_path) return sym_load(symbol_file_path) elif isinstance(sym, Symbol): return sym else: raise ValueError('_load_sym only accepts Symbol or path to the symbol file,' ' while received type %s' % str(type(sym)))
python
def _load_sym(sym, logger=logging): """Given a str as a path the symbol .json file or a symbol, returns a Symbol object.""" if isinstance(sym, str): # sym is a symbol file path cur_path = os.path.dirname(os.path.realpath(__file__)) symbol_file_path = os.path.join(cur_path, sym) logger.info('Loading symbol from file %s' % symbol_file_path) return sym_load(symbol_file_path) elif isinstance(sym, Symbol): return sym else: raise ValueError('_load_sym only accepts Symbol or path to the symbol file,' ' while received type %s' % str(type(sym)))
[ "def", "_load_sym", "(", "sym", ",", "logger", "=", "logging", ")", ":", "if", "isinstance", "(", "sym", ",", "str", ")", ":", "# sym is a symbol file path", "cur_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "symbol_file_path", "=", "os", ".", "path", ".", "join", "(", "cur_path", ",", "sym", ")", "logger", ".", "info", "(", "'Loading symbol from file %s'", "%", "symbol_file_path", ")", "return", "sym_load", "(", "symbol_file_path", ")", "elif", "isinstance", "(", "sym", ",", "Symbol", ")", ":", "return", "sym", "else", ":", "raise", "ValueError", "(", "'_load_sym only accepts Symbol or path to the symbol file,'", "' while received type %s'", "%", "str", "(", "type", "(", "sym", ")", ")", ")" ]
Given a str as a path the symbol .json file or a symbol, returns a Symbol object.
[ "Given", "a", "str", "as", "a", "path", "the", "symbol", ".", "json", "file", "or", "a", "symbol", "returns", "a", "Symbol", "object", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L384-L395
24,125
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_load_params
def _load_params(params, logger=logging): """Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params. """ if isinstance(params, str): cur_path = os.path.dirname(os.path.realpath(__file__)) param_file_path = os.path.join(cur_path, params) logger.info('Loading params from file %s' % param_file_path) save_dict = nd_load(param_file_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return arg_params, aux_params elif isinstance(params, (tuple, list)) and len(params) == 2: return params[0], params[1] else: raise ValueError('Unsupported params provided. Must be either a path to the param file or' ' a pair of dictionaries representing arg_params and aux_params')
python
def _load_params(params, logger=logging): """Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params. """ if isinstance(params, str): cur_path = os.path.dirname(os.path.realpath(__file__)) param_file_path = os.path.join(cur_path, params) logger.info('Loading params from file %s' % param_file_path) save_dict = nd_load(param_file_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return arg_params, aux_params elif isinstance(params, (tuple, list)) and len(params) == 2: return params[0], params[1] else: raise ValueError('Unsupported params provided. Must be either a path to the param file or' ' a pair of dictionaries representing arg_params and aux_params')
[ "def", "_load_params", "(", "params", ",", "logger", "=", "logging", ")", ":", "if", "isinstance", "(", "params", ",", "str", ")", ":", "cur_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "param_file_path", "=", "os", ".", "path", ".", "join", "(", "cur_path", ",", "params", ")", "logger", ".", "info", "(", "'Loading params from file %s'", "%", "param_file_path", ")", "save_dict", "=", "nd_load", "(", "param_file_path", ")", "arg_params", "=", "{", "}", "aux_params", "=", "{", "}", "for", "k", ",", "v", "in", "save_dict", ".", "items", "(", ")", ":", "tp", ",", "name", "=", "k", ".", "split", "(", "':'", ",", "1", ")", "if", "tp", "==", "'arg'", ":", "arg_params", "[", "name", "]", "=", "v", "if", "tp", "==", "'aux'", ":", "aux_params", "[", "name", "]", "=", "v", "return", "arg_params", ",", "aux_params", "elif", "isinstance", "(", "params", ",", "(", "tuple", ",", "list", ")", ")", "and", "len", "(", "params", ")", "==", "2", ":", "return", "params", "[", "0", "]", ",", "params", "[", "1", "]", "else", ":", "raise", "ValueError", "(", "'Unsupported params provided. Must be either a path to the param file or'", "' a pair of dictionaries representing arg_params and aux_params'", ")" ]
Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params.
[ "Given", "a", "str", "as", "a", "path", "to", "the", ".", "params", "file", "or", "a", "pair", "of", "params", "returns", "two", "dictionaries", "representing", "arg_params", "and", "aux_params", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L398-L420
24,126
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_LayerOutputCollector.collect
def collect(self, name, arr): """Callback function for collecting layer output NDArrays.""" name = py_str(name) if self.include_layer is not None and not self.include_layer(name): return handle = ctypes.cast(arr, NDArrayHandle) arr = NDArray(handle, writable=False).copyto(cpu()) if self.logger is not None: self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape)) if name in self.nd_dict: self.nd_dict[name].append(arr) else: self.nd_dict[name] = [arr]
python
def collect(self, name, arr): """Callback function for collecting layer output NDArrays.""" name = py_str(name) if self.include_layer is not None and not self.include_layer(name): return handle = ctypes.cast(arr, NDArrayHandle) arr = NDArray(handle, writable=False).copyto(cpu()) if self.logger is not None: self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape)) if name in self.nd_dict: self.nd_dict[name].append(arr) else: self.nd_dict[name] = [arr]
[ "def", "collect", "(", "self", ",", "name", ",", "arr", ")", ":", "name", "=", "py_str", "(", "name", ")", "if", "self", ".", "include_layer", "is", "not", "None", "and", "not", "self", ".", "include_layer", "(", "name", ")", ":", "return", "handle", "=", "ctypes", ".", "cast", "(", "arr", ",", "NDArrayHandle", ")", "arr", "=", "NDArray", "(", "handle", ",", "writable", "=", "False", ")", ".", "copyto", "(", "cpu", "(", ")", ")", "if", "self", ".", "logger", "is", "not", "None", ":", "self", ".", "logger", ".", "info", "(", "\"Collecting layer %s output of shape %s\"", "%", "(", "name", ",", "arr", ".", "shape", ")", ")", "if", "name", "in", "self", ".", "nd_dict", ":", "self", ".", "nd_dict", "[", "name", "]", ".", "append", "(", "arr", ")", "else", ":", "self", ".", "nd_dict", "[", "name", "]", "=", "[", "arr", "]" ]
Callback function for collecting layer output NDArrays.
[ "Callback", "function", "for", "collecting", "layer", "output", "NDArrays", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L137-L149
24,127
apache/incubator-mxnet
python/mxnet/contrib/quantization.py
_LayerOutputMinMaxCollector.collect
def collect(self, name, arr): """Callback function for collecting min and max values from an NDArray.""" name = py_str(name) if self.include_layer is not None and not self.include_layer(name): return handle = ctypes.cast(arr, NDArrayHandle) arr = NDArray(handle, writable=False) min_range = ndarray.min(arr).asscalar() max_range = ndarray.max(arr).asscalar() if name in self.min_max_dict: cur_min_max = self.min_max_dict[name] self.min_max_dict[name] = (min(cur_min_max[0], min_range), max(cur_min_max[1], max_range)) else: self.min_max_dict[name] = (min_range, max_range) if self.logger is not None: self.logger.info("Collecting layer %s min_range=%f, max_range=%f" % (name, min_range, max_range))
python
def collect(self, name, arr): """Callback function for collecting min and max values from an NDArray.""" name = py_str(name) if self.include_layer is not None and not self.include_layer(name): return handle = ctypes.cast(arr, NDArrayHandle) arr = NDArray(handle, writable=False) min_range = ndarray.min(arr).asscalar() max_range = ndarray.max(arr).asscalar() if name in self.min_max_dict: cur_min_max = self.min_max_dict[name] self.min_max_dict[name] = (min(cur_min_max[0], min_range), max(cur_min_max[1], max_range)) else: self.min_max_dict[name] = (min_range, max_range) if self.logger is not None: self.logger.info("Collecting layer %s min_range=%f, max_range=%f" % (name, min_range, max_range))
[ "def", "collect", "(", "self", ",", "name", ",", "arr", ")", ":", "name", "=", "py_str", "(", "name", ")", "if", "self", ".", "include_layer", "is", "not", "None", "and", "not", "self", ".", "include_layer", "(", "name", ")", ":", "return", "handle", "=", "ctypes", ".", "cast", "(", "arr", ",", "NDArrayHandle", ")", "arr", "=", "NDArray", "(", "handle", ",", "writable", "=", "False", ")", "min_range", "=", "ndarray", ".", "min", "(", "arr", ")", ".", "asscalar", "(", ")", "max_range", "=", "ndarray", ".", "max", "(", "arr", ")", ".", "asscalar", "(", ")", "if", "name", "in", "self", ".", "min_max_dict", ":", "cur_min_max", "=", "self", ".", "min_max_dict", "[", "name", "]", "self", ".", "min_max_dict", "[", "name", "]", "=", "(", "min", "(", "cur_min_max", "[", "0", "]", ",", "min_range", ")", ",", "max", "(", "cur_min_max", "[", "1", "]", ",", "max_range", ")", ")", "else", ":", "self", ".", "min_max_dict", "[", "name", "]", "=", "(", "min_range", ",", "max_range", ")", "if", "self", ".", "logger", "is", "not", "None", ":", "self", ".", "logger", ".", "info", "(", "\"Collecting layer %s min_range=%f, max_range=%f\"", "%", "(", "name", ",", "min_range", ",", "max_range", ")", ")" ]
Callback function for collecting min and max values from an NDArray.
[ "Callback", "function", "for", "collecting", "min", "and", "max", "values", "from", "an", "NDArray", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/quantization.py#L160-L177
24,128
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
encoder
def encoder(nef, z_dim, batch_size, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''The encoder is a CNN which takes 32x32 image as input generates the 100 dimensional shape embedding as a sample from normal distribution using predicted meand and variance ''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') e1 = mx.sym.Convolution(data, name='enc1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef, no_bias=no_bias) ebn1 = BatchNorm(e1, name='encbn1', fix_gamma=fix_gamma, eps=eps) eact1 = mx.sym.LeakyReLU(ebn1, name='encact1', act_type='leaky', slope=0.2) e2 = mx.sym.Convolution(eact1, name='enc2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*2, no_bias=no_bias) ebn2 = BatchNorm(e2, name='encbn2', fix_gamma=fix_gamma, eps=eps) eact2 = mx.sym.LeakyReLU(ebn2, name='encact2', act_type='leaky', slope=0.2) e3 = mx.sym.Convolution(eact2, name='enc3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*4, no_bias=no_bias) ebn3 = BatchNorm(e3, name='encbn3', fix_gamma=fix_gamma, eps=eps) eact3 = mx.sym.LeakyReLU(ebn3, name='encact3', act_type='leaky', slope=0.2) e4 = mx.sym.Convolution(eact3, name='enc4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*8, no_bias=no_bias) ebn4 = BatchNorm(e4, name='encbn4', fix_gamma=fix_gamma, eps=eps) eact4 = mx.sym.LeakyReLU(ebn4, name='encact4', act_type='leaky', slope=0.2) eact4 = mx.sym.Flatten(eact4) z_mu = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_mu") z_lv = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_lv") z = z_mu + mx.symbol.broadcast_mul(mx.symbol.exp(0.5*z_lv),mx.symbol.random_normal(loc=0, scale=1,shape=(batch_size,z_dim))) return z_mu, z_lv, z
python
def encoder(nef, z_dim, batch_size, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''The encoder is a CNN which takes 32x32 image as input generates the 100 dimensional shape embedding as a sample from normal distribution using predicted meand and variance ''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') e1 = mx.sym.Convolution(data, name='enc1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef, no_bias=no_bias) ebn1 = BatchNorm(e1, name='encbn1', fix_gamma=fix_gamma, eps=eps) eact1 = mx.sym.LeakyReLU(ebn1, name='encact1', act_type='leaky', slope=0.2) e2 = mx.sym.Convolution(eact1, name='enc2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*2, no_bias=no_bias) ebn2 = BatchNorm(e2, name='encbn2', fix_gamma=fix_gamma, eps=eps) eact2 = mx.sym.LeakyReLU(ebn2, name='encact2', act_type='leaky', slope=0.2) e3 = mx.sym.Convolution(eact2, name='enc3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*4, no_bias=no_bias) ebn3 = BatchNorm(e3, name='encbn3', fix_gamma=fix_gamma, eps=eps) eact3 = mx.sym.LeakyReLU(ebn3, name='encact3', act_type='leaky', slope=0.2) e4 = mx.sym.Convolution(eact3, name='enc4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*8, no_bias=no_bias) ebn4 = BatchNorm(e4, name='encbn4', fix_gamma=fix_gamma, eps=eps) eact4 = mx.sym.LeakyReLU(ebn4, name='encact4', act_type='leaky', slope=0.2) eact4 = mx.sym.Flatten(eact4) z_mu = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_mu") z_lv = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_lv") z = z_mu + mx.symbol.broadcast_mul(mx.symbol.exp(0.5*z_lv),mx.symbol.random_normal(loc=0, scale=1,shape=(batch_size,z_dim))) return z_mu, z_lv, z
[ "def", "encoder", "(", "nef", ",", "z_dim", ",", "batch_size", ",", "no_bias", "=", "True", ",", "fix_gamma", "=", "True", ",", "eps", "=", "1e-5", "+", "1e-12", ")", ":", "BatchNorm", "=", "mx", ".", "sym", ".", "BatchNorm", "data", "=", "mx", ".", "sym", ".", "Variable", "(", "'data'", ")", "e1", "=", "mx", ".", "sym", ".", "Convolution", "(", "data", ",", "name", "=", "'enc1'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "pad", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "nef", ",", "no_bias", "=", "no_bias", ")", "ebn1", "=", "BatchNorm", "(", "e1", ",", "name", "=", "'encbn1'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "eact1", "=", "mx", ".", "sym", ".", "LeakyReLU", "(", "ebn1", ",", "name", "=", "'encact1'", ",", "act_type", "=", "'leaky'", ",", "slope", "=", "0.2", ")", "e2", "=", "mx", ".", "sym", ".", "Convolution", "(", "eact1", ",", "name", "=", "'enc2'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "pad", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "nef", "*", "2", ",", "no_bias", "=", "no_bias", ")", "ebn2", "=", "BatchNorm", "(", "e2", ",", "name", "=", "'encbn2'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "eact2", "=", "mx", ".", "sym", ".", "LeakyReLU", "(", "ebn2", ",", "name", "=", "'encact2'", ",", "act_type", "=", "'leaky'", ",", "slope", "=", "0.2", ")", "e3", "=", "mx", ".", "sym", ".", "Convolution", "(", "eact2", ",", "name", "=", "'enc3'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "pad", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "nef", "*", "4", ",", "no_bias", "=", "no_bias", ")", "ebn3", "=", "BatchNorm", "(", "e3", ",", "name", "=", "'encbn3'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "eact3", "=", "mx", ".", "sym", ".", "LeakyReLU", "(", "ebn3", ",", "name", "=", "'encact3'", ",", "act_type", "=", "'leaky'", ",", "slope", "=", "0.2", ")", "e4", "=", "mx", ".", "sym", ".", "Convolution", "(", "eact3", ",", "name", "=", "'enc4'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "pad", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "nef", "*", "8", ",", "no_bias", "=", "no_bias", ")", "ebn4", "=", "BatchNorm", "(", "e4", ",", "name", "=", "'encbn4'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "eact4", "=", "mx", ".", "sym", ".", "LeakyReLU", "(", "ebn4", ",", "name", "=", "'encact4'", ",", "act_type", "=", "'leaky'", ",", "slope", "=", "0.2", ")", "eact4", "=", "mx", ".", "sym", ".", "Flatten", "(", "eact4", ")", "z_mu", "=", "mx", ".", "sym", ".", "FullyConnected", "(", "eact4", ",", "num_hidden", "=", "z_dim", ",", "name", "=", "\"enc_mu\"", ")", "z_lv", "=", "mx", ".", "sym", ".", "FullyConnected", "(", "eact4", ",", "num_hidden", "=", "z_dim", ",", "name", "=", "\"enc_lv\"", ")", "z", "=", "z_mu", "+", "mx", ".", "symbol", ".", "broadcast_mul", "(", "mx", ".", "symbol", ".", "exp", "(", "0.5", "*", "z_lv", ")", ",", "mx", ".", "symbol", ".", "random_normal", "(", "loc", "=", "0", ",", "scale", "=", "1", ",", "shape", "=", "(", "batch_size", ",", "z_dim", ")", ")", ")", "return", "z_mu", ",", "z_lv", ",", "z" ]
The encoder is a CNN which takes 32x32 image as input generates the 100 dimensional shape embedding as a sample from normal distribution using predicted meand and variance
[ "The", "encoder", "is", "a", "CNN", "which", "takes", "32x32", "image", "as", "input", "generates", "the", "100", "dimensional", "shape", "embedding", "as", "a", "sample", "from", "normal", "distribution", "using", "predicted", "meand", "and", "variance" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L54-L86
24,129
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
generator
def generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim=100, activation='sigmoid'): '''The genrator is a CNN which takes 100 dimensional embedding as input and reconstructs the input image given to the encoder ''' BatchNorm = mx.sym.BatchNorm rand = mx.sym.Variable('rand') rand = mx.sym.Reshape(rand, shape=(-1, z_dim, 1, 1)) g1 = mx.sym.Deconvolution(rand, name='gen1', kernel=(5,5), stride=(2,2),target_shape=(2,2), num_filter=ngf*8, no_bias=no_bias) gbn1 = BatchNorm(g1, name='genbn1', fix_gamma=fix_gamma, eps=eps) gact1 = mx.sym.Activation(gbn1, name="genact1", act_type="relu") g2 = mx.sym.Deconvolution(gact1, name='gen2', kernel=(5,5), stride=(2,2),target_shape=(4,4), num_filter=ngf*4, no_bias=no_bias) gbn2 = BatchNorm(g2, name='genbn2', fix_gamma=fix_gamma, eps=eps) gact2 = mx.sym.Activation(gbn2, name='genact2', act_type='relu') g3 = mx.sym.Deconvolution(gact2, name='gen3', kernel=(5,5), stride=(2,2), target_shape=(8,8), num_filter=ngf*2, no_bias=no_bias) gbn3 = BatchNorm(g3, name='genbn3', fix_gamma=fix_gamma, eps=eps) gact3 = mx.sym.Activation(gbn3, name='genact3', act_type='relu') g4 = mx.sym.Deconvolution(gact3, name='gen4', kernel=(5,5), stride=(2,2), target_shape=(16,16), num_filter=ngf, no_bias=no_bias) gbn4 = BatchNorm(g4, name='genbn4', fix_gamma=fix_gamma, eps=eps) gact4 = mx.sym.Activation(gbn4, name='genact4', act_type='relu') g5 = mx.sym.Deconvolution(gact4, name='gen5', kernel=(5,5), stride=(2,2), target_shape=(32,32), num_filter=nc, no_bias=no_bias) gout = mx.sym.Activation(g5, name='genact5', act_type=activation) return gout
python
def generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim=100, activation='sigmoid'): '''The genrator is a CNN which takes 100 dimensional embedding as input and reconstructs the input image given to the encoder ''' BatchNorm = mx.sym.BatchNorm rand = mx.sym.Variable('rand') rand = mx.sym.Reshape(rand, shape=(-1, z_dim, 1, 1)) g1 = mx.sym.Deconvolution(rand, name='gen1', kernel=(5,5), stride=(2,2),target_shape=(2,2), num_filter=ngf*8, no_bias=no_bias) gbn1 = BatchNorm(g1, name='genbn1', fix_gamma=fix_gamma, eps=eps) gact1 = mx.sym.Activation(gbn1, name="genact1", act_type="relu") g2 = mx.sym.Deconvolution(gact1, name='gen2', kernel=(5,5), stride=(2,2),target_shape=(4,4), num_filter=ngf*4, no_bias=no_bias) gbn2 = BatchNorm(g2, name='genbn2', fix_gamma=fix_gamma, eps=eps) gact2 = mx.sym.Activation(gbn2, name='genact2', act_type='relu') g3 = mx.sym.Deconvolution(gact2, name='gen3', kernel=(5,5), stride=(2,2), target_shape=(8,8), num_filter=ngf*2, no_bias=no_bias) gbn3 = BatchNorm(g3, name='genbn3', fix_gamma=fix_gamma, eps=eps) gact3 = mx.sym.Activation(gbn3, name='genact3', act_type='relu') g4 = mx.sym.Deconvolution(gact3, name='gen4', kernel=(5,5), stride=(2,2), target_shape=(16,16), num_filter=ngf, no_bias=no_bias) gbn4 = BatchNorm(g4, name='genbn4', fix_gamma=fix_gamma, eps=eps) gact4 = mx.sym.Activation(gbn4, name='genact4', act_type='relu') g5 = mx.sym.Deconvolution(gact4, name='gen5', kernel=(5,5), stride=(2,2), target_shape=(32,32), num_filter=nc, no_bias=no_bias) gout = mx.sym.Activation(g5, name='genact5', act_type=activation) return gout
[ "def", "generator", "(", "ngf", ",", "nc", ",", "no_bias", "=", "True", ",", "fix_gamma", "=", "True", ",", "eps", "=", "1e-5", "+", "1e-12", ",", "z_dim", "=", "100", ",", "activation", "=", "'sigmoid'", ")", ":", "BatchNorm", "=", "mx", ".", "sym", ".", "BatchNorm", "rand", "=", "mx", ".", "sym", ".", "Variable", "(", "'rand'", ")", "rand", "=", "mx", ".", "sym", ".", "Reshape", "(", "rand", ",", "shape", "=", "(", "-", "1", ",", "z_dim", ",", "1", ",", "1", ")", ")", "g1", "=", "mx", ".", "sym", ".", "Deconvolution", "(", "rand", ",", "name", "=", "'gen1'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "target_shape", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "ngf", "*", "8", ",", "no_bias", "=", "no_bias", ")", "gbn1", "=", "BatchNorm", "(", "g1", ",", "name", "=", "'genbn1'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "gact1", "=", "mx", ".", "sym", ".", "Activation", "(", "gbn1", ",", "name", "=", "\"genact1\"", ",", "act_type", "=", "\"relu\"", ")", "g2", "=", "mx", ".", "sym", ".", "Deconvolution", "(", "gact1", ",", "name", "=", "'gen2'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "target_shape", "=", "(", "4", ",", "4", ")", ",", "num_filter", "=", "ngf", "*", "4", ",", "no_bias", "=", "no_bias", ")", "gbn2", "=", "BatchNorm", "(", "g2", ",", "name", "=", "'genbn2'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "gact2", "=", "mx", ".", "sym", ".", "Activation", "(", "gbn2", ",", "name", "=", "'genact2'", ",", "act_type", "=", "'relu'", ")", "g3", "=", "mx", ".", "sym", ".", "Deconvolution", "(", "gact2", ",", "name", "=", "'gen3'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "target_shape", "=", "(", "8", ",", "8", ")", ",", "num_filter", "=", "ngf", "*", "2", ",", "no_bias", "=", "no_bias", ")", "gbn3", "=", "BatchNorm", "(", "g3", ",", "name", "=", "'genbn3'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "gact3", "=", "mx", ".", "sym", ".", "Activation", "(", "gbn3", ",", "name", "=", "'genact3'", ",", "act_type", "=", "'relu'", ")", "g4", "=", "mx", ".", "sym", ".", "Deconvolution", "(", "gact3", ",", "name", "=", "'gen4'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "target_shape", "=", "(", "16", ",", "16", ")", ",", "num_filter", "=", "ngf", ",", "no_bias", "=", "no_bias", ")", "gbn4", "=", "BatchNorm", "(", "g4", ",", "name", "=", "'genbn4'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "gact4", "=", "mx", ".", "sym", ".", "Activation", "(", "gbn4", ",", "name", "=", "'genact4'", ",", "act_type", "=", "'relu'", ")", "g5", "=", "mx", ".", "sym", ".", "Deconvolution", "(", "gact4", ",", "name", "=", "'gen5'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "target_shape", "=", "(", "32", ",", "32", ")", ",", "num_filter", "=", "nc", ",", "no_bias", "=", "no_bias", ")", "gout", "=", "mx", ".", "sym", ".", "Activation", "(", "g5", ",", "name", "=", "'genact5'", ",", "act_type", "=", "activation", ")", "return", "gout" ]
The genrator is a CNN which takes 100 dimensional embedding as input and reconstructs the input image given to the encoder
[ "The", "genrator", "is", "a", "CNN", "which", "takes", "100", "dimensional", "embedding", "as", "input", "and", "reconstructs", "the", "input", "image", "given", "to", "the", "encoder" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L88-L116
24,130
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
discriminator1
def discriminator1(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''First part of the discriminator which takes a 32x32 image as input and output a convolutional feature map, this is required to calculate the layer loss''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') d1 = mx.sym.Convolution(data, name='d1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf, no_bias=no_bias) dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2) d2 = mx.sym.Convolution(dact1, name='d2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*2, no_bias=no_bias) dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps) dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2) d3 = mx.sym.Convolution(dact2, name='d3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*4, no_bias=no_bias) dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps) dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2) return dact3
python
def discriminator1(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''First part of the discriminator which takes a 32x32 image as input and output a convolutional feature map, this is required to calculate the layer loss''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') d1 = mx.sym.Convolution(data, name='d1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf, no_bias=no_bias) dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2) d2 = mx.sym.Convolution(dact1, name='d2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*2, no_bias=no_bias) dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps) dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2) d3 = mx.sym.Convolution(dact2, name='d3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*4, no_bias=no_bias) dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps) dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2) return dact3
[ "def", "discriminator1", "(", "ndf", ",", "no_bias", "=", "True", ",", "fix_gamma", "=", "True", ",", "eps", "=", "1e-5", "+", "1e-12", ")", ":", "BatchNorm", "=", "mx", ".", "sym", ".", "BatchNorm", "data", "=", "mx", ".", "sym", ".", "Variable", "(", "'data'", ")", "d1", "=", "mx", ".", "sym", ".", "Convolution", "(", "data", ",", "name", "=", "'d1'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "pad", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "ndf", ",", "no_bias", "=", "no_bias", ")", "dact1", "=", "mx", ".", "sym", ".", "LeakyReLU", "(", "d1", ",", "name", "=", "'dact1'", ",", "act_type", "=", "'leaky'", ",", "slope", "=", "0.2", ")", "d2", "=", "mx", ".", "sym", ".", "Convolution", "(", "dact1", ",", "name", "=", "'d2'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "pad", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "ndf", "*", "2", ",", "no_bias", "=", "no_bias", ")", "dbn2", "=", "BatchNorm", "(", "d2", ",", "name", "=", "'dbn2'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "dact2", "=", "mx", ".", "sym", ".", "LeakyReLU", "(", "dbn2", ",", "name", "=", "'dact2'", ",", "act_type", "=", "'leaky'", ",", "slope", "=", "0.2", ")", "d3", "=", "mx", ".", "sym", ".", "Convolution", "(", "dact2", ",", "name", "=", "'d3'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "pad", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "ndf", "*", "4", ",", "no_bias", "=", "no_bias", ")", "dbn3", "=", "BatchNorm", "(", "d3", ",", "name", "=", "'dbn3'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "dact3", "=", "mx", ".", "sym", ".", "LeakyReLU", "(", "dbn3", ",", "name", "=", "'dact3'", ",", "act_type", "=", "'leaky'", ",", "slope", "=", "0.2", ")", "return", "dact3" ]
First part of the discriminator which takes a 32x32 image as input and output a convolutional feature map, this is required to calculate the layer loss
[ "First", "part", "of", "the", "discriminator", "which", "takes", "a", "32x32", "image", "as", "input", "and", "output", "a", "convolutional", "feature", "map", "this", "is", "required", "to", "calculate", "the", "layer", "loss" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L118-L137
24,131
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
discriminator2
def discriminator2(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''Second part of the discriminator which takes a 256x8x8 feature map as input and generates the loss based on whether the input image was a real one or fake one''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') label = mx.sym.Variable('label') d4 = mx.sym.Convolution(data, name='d4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*8, no_bias=no_bias) dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps) dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2) h = mx.sym.Flatten(dact4) d5 = mx.sym.FullyConnected(h, num_hidden=1, name="d5") dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss') return dloss
python
def discriminator2(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12): '''Second part of the discriminator which takes a 256x8x8 feature map as input and generates the loss based on whether the input image was a real one or fake one''' BatchNorm = mx.sym.BatchNorm data = mx.sym.Variable('data') label = mx.sym.Variable('label') d4 = mx.sym.Convolution(data, name='d4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*8, no_bias=no_bias) dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps) dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2) h = mx.sym.Flatten(dact4) d5 = mx.sym.FullyConnected(h, num_hidden=1, name="d5") dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss') return dloss
[ "def", "discriminator2", "(", "ndf", ",", "no_bias", "=", "True", ",", "fix_gamma", "=", "True", ",", "eps", "=", "1e-5", "+", "1e-12", ")", ":", "BatchNorm", "=", "mx", ".", "sym", ".", "BatchNorm", "data", "=", "mx", ".", "sym", ".", "Variable", "(", "'data'", ")", "label", "=", "mx", ".", "sym", ".", "Variable", "(", "'label'", ")", "d4", "=", "mx", ".", "sym", ".", "Convolution", "(", "data", ",", "name", "=", "'d4'", ",", "kernel", "=", "(", "5", ",", "5", ")", ",", "stride", "=", "(", "2", ",", "2", ")", ",", "pad", "=", "(", "2", ",", "2", ")", ",", "num_filter", "=", "ndf", "*", "8", ",", "no_bias", "=", "no_bias", ")", "dbn4", "=", "BatchNorm", "(", "d4", ",", "name", "=", "'dbn4'", ",", "fix_gamma", "=", "fix_gamma", ",", "eps", "=", "eps", ")", "dact4", "=", "mx", ".", "sym", ".", "LeakyReLU", "(", "dbn4", ",", "name", "=", "'dact4'", ",", "act_type", "=", "'leaky'", ",", "slope", "=", "0.2", ")", "h", "=", "mx", ".", "sym", ".", "Flatten", "(", "dact4", ")", "d5", "=", "mx", ".", "sym", ".", "FullyConnected", "(", "h", ",", "num_hidden", "=", "1", ",", "name", "=", "\"d5\"", ")", "dloss", "=", "mx", ".", "sym", ".", "LogisticRegressionOutput", "(", "data", "=", "d5", ",", "label", "=", "label", ",", "name", "=", "'dloss'", ")", "return", "dloss" ]
Second part of the discriminator which takes a 256x8x8 feature map as input and generates the loss based on whether the input image was a real one or fake one
[ "Second", "part", "of", "the", "discriminator", "which", "takes", "a", "256x8x8", "feature", "map", "as", "input", "and", "generates", "the", "loss", "based", "on", "whether", "the", "input", "image", "was", "a", "real", "one", "or", "fake", "one" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L139-L159
24,132
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
GaussianLogDensity
def GaussianLogDensity(x, mu, log_var, name='GaussianLogDensity', EPSILON = 1e-6): '''GaussianLogDensity loss calculation for layer wise loss ''' c = mx.sym.ones_like(log_var)*2.0 * 3.1416 c = mx.symbol.log(c) var = mx.sym.exp(log_var) x_mu2 = mx.symbol.square(x - mu) # [Issue] not sure the dim works or not? x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON) log_prob = -0.5 * (c + log_var + x_mu2_over_var) log_prob = mx.symbol.sum(log_prob, axis=1, name=name) # keep_dims=True, return log_prob
python
def GaussianLogDensity(x, mu, log_var, name='GaussianLogDensity', EPSILON = 1e-6): '''GaussianLogDensity loss calculation for layer wise loss ''' c = mx.sym.ones_like(log_var)*2.0 * 3.1416 c = mx.symbol.log(c) var = mx.sym.exp(log_var) x_mu2 = mx.symbol.square(x - mu) # [Issue] not sure the dim works or not? x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON) log_prob = -0.5 * (c + log_var + x_mu2_over_var) log_prob = mx.symbol.sum(log_prob, axis=1, name=name) # keep_dims=True, return log_prob
[ "def", "GaussianLogDensity", "(", "x", ",", "mu", ",", "log_var", ",", "name", "=", "'GaussianLogDensity'", ",", "EPSILON", "=", "1e-6", ")", ":", "c", "=", "mx", ".", "sym", ".", "ones_like", "(", "log_var", ")", "*", "2.0", "*", "3.1416", "c", "=", "mx", ".", "symbol", ".", "log", "(", "c", ")", "var", "=", "mx", ".", "sym", ".", "exp", "(", "log_var", ")", "x_mu2", "=", "mx", ".", "symbol", ".", "square", "(", "x", "-", "mu", ")", "# [Issue] not sure the dim works or not?", "x_mu2_over_var", "=", "mx", ".", "symbol", ".", "broadcast_div", "(", "x_mu2", ",", "var", "+", "EPSILON", ")", "log_prob", "=", "-", "0.5", "*", "(", "c", "+", "log_var", "+", "x_mu2_over_var", ")", "log_prob", "=", "mx", ".", "symbol", ".", "sum", "(", "log_prob", ",", "axis", "=", "1", ",", "name", "=", "name", ")", "# keep_dims=True,", "return", "log_prob" ]
GaussianLogDensity loss calculation for layer wise loss
[ "GaussianLogDensity", "loss", "calculation", "for", "layer", "wise", "loss" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L161-L171
24,133
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
DiscriminatorLayerLoss
def DiscriminatorLayerLoss(): '''Calculate the discriminator layer loss ''' data = mx.sym.Variable('data') label = mx.sym.Variable('label') data = mx.sym.Flatten(data) label = mx.sym.Flatten(label) label = mx.sym.BlockGrad(label) zeros = mx.sym.zeros_like(data) output = -GaussianLogDensity(label, data, zeros) dloss = mx.symbol.MakeLoss(mx.symbol.mean(output),name='lloss') return dloss
python
def DiscriminatorLayerLoss(): '''Calculate the discriminator layer loss ''' data = mx.sym.Variable('data') label = mx.sym.Variable('label') data = mx.sym.Flatten(data) label = mx.sym.Flatten(label) label = mx.sym.BlockGrad(label) zeros = mx.sym.zeros_like(data) output = -GaussianLogDensity(label, data, zeros) dloss = mx.symbol.MakeLoss(mx.symbol.mean(output),name='lloss') return dloss
[ "def", "DiscriminatorLayerLoss", "(", ")", ":", "data", "=", "mx", ".", "sym", ".", "Variable", "(", "'data'", ")", "label", "=", "mx", ".", "sym", ".", "Variable", "(", "'label'", ")", "data", "=", "mx", ".", "sym", ".", "Flatten", "(", "data", ")", "label", "=", "mx", ".", "sym", ".", "Flatten", "(", "label", ")", "label", "=", "mx", ".", "sym", ".", "BlockGrad", "(", "label", ")", "zeros", "=", "mx", ".", "sym", ".", "zeros_like", "(", "data", ")", "output", "=", "-", "GaussianLogDensity", "(", "label", ",", "data", ",", "zeros", ")", "dloss", "=", "mx", ".", "symbol", ".", "MakeLoss", "(", "mx", ".", "symbol", ".", "mean", "(", "output", ")", ",", "name", "=", "'lloss'", ")", "return", "dloss" ]
Calculate the discriminator layer loss
[ "Calculate", "the", "discriminator", "layer", "loss" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L173-L192
24,134
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
get_data
def get_data(path, activation): '''Get the dataset ''' data = [] image_names = [] for filename in os.listdir(path): img = cv2.imread(os.path.join(path,filename), cv2.IMREAD_GRAYSCALE) image_names.append(filename) if img is not None: data.append(img) data = np.asarray(data) if activation == 'sigmoid': data = data.astype(np.float32)/(255.0) elif activation == 'tanh': data = data.astype(np.float32)/(255.0/2) - 1.0 data = data.reshape((data.shape[0], 1, data.shape[1], data.shape[2])) np.random.seed(1234) p = np.random.permutation(data.shape[0]) X = data[p] return X, image_names
python
def get_data(path, activation): '''Get the dataset ''' data = [] image_names = [] for filename in os.listdir(path): img = cv2.imread(os.path.join(path,filename), cv2.IMREAD_GRAYSCALE) image_names.append(filename) if img is not None: data.append(img) data = np.asarray(data) if activation == 'sigmoid': data = data.astype(np.float32)/(255.0) elif activation == 'tanh': data = data.astype(np.float32)/(255.0/2) - 1.0 data = data.reshape((data.shape[0], 1, data.shape[1], data.shape[2])) np.random.seed(1234) p = np.random.permutation(data.shape[0]) X = data[p] return X, image_names
[ "def", "get_data", "(", "path", ",", "activation", ")", ":", "data", "=", "[", "]", "image_names", "=", "[", "]", "for", "filename", "in", "os", ".", "listdir", "(", "path", ")", ":", "img", "=", "cv2", ".", "imread", "(", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ",", "cv2", ".", "IMREAD_GRAYSCALE", ")", "image_names", ".", "append", "(", "filename", ")", "if", "img", "is", "not", "None", ":", "data", ".", "append", "(", "img", ")", "data", "=", "np", ".", "asarray", "(", "data", ")", "if", "activation", "==", "'sigmoid'", ":", "data", "=", "data", ".", "astype", "(", "np", ".", "float32", ")", "/", "(", "255.0", ")", "elif", "activation", "==", "'tanh'", ":", "data", "=", "data", ".", "astype", "(", "np", ".", "float32", ")", "/", "(", "255.0", "/", "2", ")", "-", "1.0", "data", "=", "data", ".", "reshape", "(", "(", "data", ".", "shape", "[", "0", "]", ",", "1", ",", "data", ".", "shape", "[", "1", "]", ",", "data", ".", "shape", "[", "2", "]", ")", ")", "np", ".", "random", ".", "seed", "(", "1234", ")", "p", "=", "np", ".", "random", ".", "permutation", "(", "data", ".", "shape", "[", "0", "]", ")", "X", "=", "data", "[", "p", "]", "return", "X", ",", "image_names" ]
Get the dataset
[ "Get", "the", "dataset" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L213-L237
24,135
apache/incubator-mxnet
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
get_rmse_log
def get_rmse_log(net, X_train, y_train): """Gets root mse between the logarithms of the prediction and the truth.""" num_train = X_train.shape[0] clipped_preds = nd.clip(net(X_train), 1, float('inf')) return np.sqrt(2 * nd.sum(square_loss( nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
python
def get_rmse_log(net, X_train, y_train): """Gets root mse between the logarithms of the prediction and the truth.""" num_train = X_train.shape[0] clipped_preds = nd.clip(net(X_train), 1, float('inf')) return np.sqrt(2 * nd.sum(square_loss( nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
[ "def", "get_rmse_log", "(", "net", ",", "X_train", ",", "y_train", ")", ":", "num_train", "=", "X_train", ".", "shape", "[", "0", "]", "clipped_preds", "=", "nd", ".", "clip", "(", "net", "(", "X_train", ")", ",", "1", ",", "float", "(", "'inf'", ")", ")", "return", "np", ".", "sqrt", "(", "2", "*", "nd", ".", "sum", "(", "square_loss", "(", "nd", ".", "log", "(", "clipped_preds", ")", ",", "nd", ".", "log", "(", "y_train", ")", ")", ")", ".", "asscalar", "(", ")", "/", "num_train", ")" ]
Gets root mse between the logarithms of the prediction and the truth.
[ "Gets", "root", "mse", "between", "the", "logarithms", "of", "the", "prediction", "and", "the", "truth", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L66-L71
24,136
apache/incubator-mxnet
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
get_net
def get_net(): """Gets a neural network. Better results are obtained with modifications.""" net = gluon.nn.Sequential() with net.name_scope(): net.add(gluon.nn.Dense(50, activation="relu")) net.add(gluon.nn.Dense(1)) net.initialize() return net
python
def get_net(): """Gets a neural network. Better results are obtained with modifications.""" net = gluon.nn.Sequential() with net.name_scope(): net.add(gluon.nn.Dense(50, activation="relu")) net.add(gluon.nn.Dense(1)) net.initialize() return net
[ "def", "get_net", "(", ")", ":", "net", "=", "gluon", ".", "nn", ".", "Sequential", "(", ")", "with", "net", ".", "name_scope", "(", ")", ":", "net", ".", "add", "(", "gluon", ".", "nn", ".", "Dense", "(", "50", ",", "activation", "=", "\"relu\"", ")", ")", "net", ".", "add", "(", "gluon", ".", "nn", ".", "Dense", "(", "1", ")", ")", "net", ".", "initialize", "(", ")", "return", "net" ]
Gets a neural network. Better results are obtained with modifications.
[ "Gets", "a", "neural", "network", ".", "Better", "results", "are", "obtained", "with", "modifications", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L73-L80
24,137
apache/incubator-mxnet
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
train
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size): """Trains the model.""" dataset_train = gluon.data.ArrayDataset(X_train, y_train) data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) net.initialize(force_reinit=True) for epoch in range(epochs): for data, label in data_iter_train: with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() trainer.step(batch_size) avg_loss = get_rmse_log(net, X_train, y_train) if epoch > verbose_epoch: print("Epoch %d, train loss: %f" % (epoch, avg_loss)) return avg_loss
python
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size): """Trains the model.""" dataset_train = gluon.data.ArrayDataset(X_train, y_train) data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) net.initialize(force_reinit=True) for epoch in range(epochs): for data, label in data_iter_train: with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() trainer.step(batch_size) avg_loss = get_rmse_log(net, X_train, y_train) if epoch > verbose_epoch: print("Epoch %d, train loss: %f" % (epoch, avg_loss)) return avg_loss
[ "def", "train", "(", "net", ",", "X_train", ",", "y_train", ",", "epochs", ",", "verbose_epoch", ",", "learning_rate", ",", "weight_decay", ",", "batch_size", ")", ":", "dataset_train", "=", "gluon", ".", "data", ".", "ArrayDataset", "(", "X_train", ",", "y_train", ")", "data_iter_train", "=", "gluon", ".", "data", ".", "DataLoader", "(", "dataset_train", ",", "batch_size", ",", "shuffle", "=", "True", ")", "trainer", "=", "gluon", ".", "Trainer", "(", "net", ".", "collect_params", "(", ")", ",", "'adam'", ",", "{", "'learning_rate'", ":", "learning_rate", ",", "'wd'", ":", "weight_decay", "}", ")", "net", ".", "initialize", "(", "force_reinit", "=", "True", ")", "for", "epoch", "in", "range", "(", "epochs", ")", ":", "for", "data", ",", "label", "in", "data_iter_train", ":", "with", "autograd", ".", "record", "(", ")", ":", "output", "=", "net", "(", "data", ")", "loss", "=", "square_loss", "(", "output", ",", "label", ")", "loss", ".", "backward", "(", ")", "trainer", ".", "step", "(", "batch_size", ")", "avg_loss", "=", "get_rmse_log", "(", "net", ",", "X_train", ",", "y_train", ")", "if", "epoch", ">", "verbose_epoch", ":", "print", "(", "\"Epoch %d, train loss: %f\"", "%", "(", "epoch", ",", "avg_loss", ")", ")", "return", "avg_loss" ]
Trains the model.
[ "Trains", "the", "model", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L82-L102
24,138
apache/incubator-mxnet
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
k_fold_cross_valid
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train, learning_rate, weight_decay, batch_size): """Conducts k-fold cross validation for the model.""" assert k > 1 fold_size = X_train.shape[0] // k train_loss_sum = 0.0 test_loss_sum = 0.0 for test_idx in range(k): X_val_test = X_train[test_idx * fold_size: (test_idx + 1) * fold_size, :] y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size] val_train_defined = False for i in range(k): if i != test_idx: X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :] y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size] if not val_train_defined: X_val_train = X_cur_fold y_val_train = y_cur_fold val_train_defined = True else: X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0) y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0) net = get_net() train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) train_loss_sum += train_loss test_loss = get_rmse_log(net, X_val_test, y_val_test) print("Test loss: %f" % test_loss) test_loss_sum += test_loss return train_loss_sum / k, test_loss_sum / k
python
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train, learning_rate, weight_decay, batch_size): """Conducts k-fold cross validation for the model.""" assert k > 1 fold_size = X_train.shape[0] // k train_loss_sum = 0.0 test_loss_sum = 0.0 for test_idx in range(k): X_val_test = X_train[test_idx * fold_size: (test_idx + 1) * fold_size, :] y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size] val_train_defined = False for i in range(k): if i != test_idx: X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :] y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size] if not val_train_defined: X_val_train = X_cur_fold y_val_train = y_cur_fold val_train_defined = True else: X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0) y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0) net = get_net() train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) train_loss_sum += train_loss test_loss = get_rmse_log(net, X_val_test, y_val_test) print("Test loss: %f" % test_loss) test_loss_sum += test_loss return train_loss_sum / k, test_loss_sum / k
[ "def", "k_fold_cross_valid", "(", "k", ",", "epochs", ",", "verbose_epoch", ",", "X_train", ",", "y_train", ",", "learning_rate", ",", "weight_decay", ",", "batch_size", ")", ":", "assert", "k", ">", "1", "fold_size", "=", "X_train", ".", "shape", "[", "0", "]", "//", "k", "train_loss_sum", "=", "0.0", "test_loss_sum", "=", "0.0", "for", "test_idx", "in", "range", "(", "k", ")", ":", "X_val_test", "=", "X_train", "[", "test_idx", "*", "fold_size", ":", "(", "test_idx", "+", "1", ")", "*", "fold_size", ",", ":", "]", "y_val_test", "=", "y_train", "[", "test_idx", "*", "fold_size", ":", "(", "test_idx", "+", "1", ")", "*", "fold_size", "]", "val_train_defined", "=", "False", "for", "i", "in", "range", "(", "k", ")", ":", "if", "i", "!=", "test_idx", ":", "X_cur_fold", "=", "X_train", "[", "i", "*", "fold_size", ":", "(", "i", "+", "1", ")", "*", "fold_size", ",", ":", "]", "y_cur_fold", "=", "y_train", "[", "i", "*", "fold_size", ":", "(", "i", "+", "1", ")", "*", "fold_size", "]", "if", "not", "val_train_defined", ":", "X_val_train", "=", "X_cur_fold", "y_val_train", "=", "y_cur_fold", "val_train_defined", "=", "True", "else", ":", "X_val_train", "=", "nd", ".", "concat", "(", "X_val_train", ",", "X_cur_fold", ",", "dim", "=", "0", ")", "y_val_train", "=", "nd", ".", "concat", "(", "y_val_train", ",", "y_cur_fold", ",", "dim", "=", "0", ")", "net", "=", "get_net", "(", ")", "train_loss", "=", "train", "(", "net", ",", "X_val_train", ",", "y_val_train", ",", "epochs", ",", "verbose_epoch", ",", "learning_rate", ",", "weight_decay", ",", "batch_size", ")", "train_loss_sum", "+=", "train_loss", "test_loss", "=", "get_rmse_log", "(", "net", ",", "X_val_test", ",", "y_val_test", ")", "print", "(", "\"Test loss: %f\"", "%", "test_loss", ")", "test_loss_sum", "+=", "test_loss", "return", "train_loss_sum", "/", "k", ",", "test_loss_sum", "/", "k" ]
Conducts k-fold cross validation for the model.
[ "Conducts", "k", "-", "fold", "cross", "validation", "for", "the", "model", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L104-L135
24,139
apache/incubator-mxnet
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
learn
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, weight_decay, batch_size): """Trains the model and predicts on the test data set.""" net = get_net() _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) preds = net(X_test).asnumpy() test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test['Id'], test['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
python
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, weight_decay, batch_size): """Trains the model and predicts on the test data set.""" net = get_net() _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) preds = net(X_test).asnumpy() test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test['Id'], test['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
[ "def", "learn", "(", "epochs", ",", "verbose_epoch", ",", "X_train", ",", "y_train", ",", "test", ",", "learning_rate", ",", "weight_decay", ",", "batch_size", ")", ":", "net", "=", "get_net", "(", ")", "_", "=", "train", "(", "net", ",", "X_train", ",", "y_train", ",", "epochs", ",", "verbose_epoch", ",", "learning_rate", ",", "weight_decay", ",", "batch_size", ")", "preds", "=", "net", "(", "X_test", ")", ".", "asnumpy", "(", ")", "test", "[", "'SalePrice'", "]", "=", "pd", ".", "Series", "(", "preds", ".", "reshape", "(", "1", ",", "-", "1", ")", "[", "0", "]", ")", "submission", "=", "pd", ".", "concat", "(", "[", "test", "[", "'Id'", "]", ",", "test", "[", "'SalePrice'", "]", "]", ",", "axis", "=", "1", ")", "submission", ".", "to_csv", "(", "'submission.csv'", ",", "index", "=", "False", ")" ]
Trains the model and predicts on the test data set.
[ "Trains", "the", "model", "and", "predicts", "on", "the", "test", "data", "set", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L152-L161
24,140
apache/incubator-mxnet
example/capsnet/capsulenet.py
do_training
def do_training(num_epoch, optimizer, kvstore, learning_rate, model_prefix, decay): """Perform CapsNet training""" summary_writer = SummaryWriter(args.tblog_dir) lr_scheduler = SimpleLRScheduler(learning_rate) optimizer_params = {'lr_scheduler': lr_scheduler} module.init_params() module.init_optimizer(kvstore=kvstore, optimizer=optimizer, optimizer_params=optimizer_params) n_epoch = 0 while True: if n_epoch >= num_epoch: break train_iter.reset() val_iter.reset() loss_metric.reset() for n_batch, data_batch in enumerate(train_iter): module.forward_backward(data_batch) module.update() module.update_metric(loss_metric, data_batch.label) loss_metric.get_batch_log(n_batch) train_acc, train_loss, train_recon_err = loss_metric.get_name_value() loss_metric.reset() for n_batch, data_batch in enumerate(val_iter): module.forward(data_batch) module.update_metric(loss_metric, data_batch.label) loss_metric.get_batch_log(n_batch) val_acc, val_loss, val_recon_err = loss_metric.get_name_value() summary_writer.add_scalar('train_acc', train_acc, n_epoch) summary_writer.add_scalar('train_loss', train_loss, n_epoch) summary_writer.add_scalar('train_recon_err', train_recon_err, n_epoch) summary_writer.add_scalar('val_acc', val_acc, n_epoch) summary_writer.add_scalar('val_loss', val_loss, n_epoch) summary_writer.add_scalar('val_recon_err', val_recon_err, n_epoch) print('Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, train_acc, train_loss, train_recon_err)) print('Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, val_acc, val_loss, val_recon_err)) print('SAVE CHECKPOINT') module.save_checkpoint(prefix=model_prefix, epoch=n_epoch) n_epoch += 1 lr_scheduler.learning_rate = learning_rate * (decay ** n_epoch)
python
def do_training(num_epoch, optimizer, kvstore, learning_rate, model_prefix, decay): """Perform CapsNet training""" summary_writer = SummaryWriter(args.tblog_dir) lr_scheduler = SimpleLRScheduler(learning_rate) optimizer_params = {'lr_scheduler': lr_scheduler} module.init_params() module.init_optimizer(kvstore=kvstore, optimizer=optimizer, optimizer_params=optimizer_params) n_epoch = 0 while True: if n_epoch >= num_epoch: break train_iter.reset() val_iter.reset() loss_metric.reset() for n_batch, data_batch in enumerate(train_iter): module.forward_backward(data_batch) module.update() module.update_metric(loss_metric, data_batch.label) loss_metric.get_batch_log(n_batch) train_acc, train_loss, train_recon_err = loss_metric.get_name_value() loss_metric.reset() for n_batch, data_batch in enumerate(val_iter): module.forward(data_batch) module.update_metric(loss_metric, data_batch.label) loss_metric.get_batch_log(n_batch) val_acc, val_loss, val_recon_err = loss_metric.get_name_value() summary_writer.add_scalar('train_acc', train_acc, n_epoch) summary_writer.add_scalar('train_loss', train_loss, n_epoch) summary_writer.add_scalar('train_recon_err', train_recon_err, n_epoch) summary_writer.add_scalar('val_acc', val_acc, n_epoch) summary_writer.add_scalar('val_loss', val_loss, n_epoch) summary_writer.add_scalar('val_recon_err', val_recon_err, n_epoch) print('Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, train_acc, train_loss, train_recon_err)) print('Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, val_acc, val_loss, val_recon_err)) print('SAVE CHECKPOINT') module.save_checkpoint(prefix=model_prefix, epoch=n_epoch) n_epoch += 1 lr_scheduler.learning_rate = learning_rate * (decay ** n_epoch)
[ "def", "do_training", "(", "num_epoch", ",", "optimizer", ",", "kvstore", ",", "learning_rate", ",", "model_prefix", ",", "decay", ")", ":", "summary_writer", "=", "SummaryWriter", "(", "args", ".", "tblog_dir", ")", "lr_scheduler", "=", "SimpleLRScheduler", "(", "learning_rate", ")", "optimizer_params", "=", "{", "'lr_scheduler'", ":", "lr_scheduler", "}", "module", ".", "init_params", "(", ")", "module", ".", "init_optimizer", "(", "kvstore", "=", "kvstore", ",", "optimizer", "=", "optimizer", ",", "optimizer_params", "=", "optimizer_params", ")", "n_epoch", "=", "0", "while", "True", ":", "if", "n_epoch", ">=", "num_epoch", ":", "break", "train_iter", ".", "reset", "(", ")", "val_iter", ".", "reset", "(", ")", "loss_metric", ".", "reset", "(", ")", "for", "n_batch", ",", "data_batch", "in", "enumerate", "(", "train_iter", ")", ":", "module", ".", "forward_backward", "(", "data_batch", ")", "module", ".", "update", "(", ")", "module", ".", "update_metric", "(", "loss_metric", ",", "data_batch", ".", "label", ")", "loss_metric", ".", "get_batch_log", "(", "n_batch", ")", "train_acc", ",", "train_loss", ",", "train_recon_err", "=", "loss_metric", ".", "get_name_value", "(", ")", "loss_metric", ".", "reset", "(", ")", "for", "n_batch", ",", "data_batch", "in", "enumerate", "(", "val_iter", ")", ":", "module", ".", "forward", "(", "data_batch", ")", "module", ".", "update_metric", "(", "loss_metric", ",", "data_batch", ".", "label", ")", "loss_metric", ".", "get_batch_log", "(", "n_batch", ")", "val_acc", ",", "val_loss", ",", "val_recon_err", "=", "loss_metric", ".", "get_name_value", "(", ")", "summary_writer", ".", "add_scalar", "(", "'train_acc'", ",", "train_acc", ",", "n_epoch", ")", "summary_writer", ".", "add_scalar", "(", "'train_loss'", ",", "train_loss", ",", "n_epoch", ")", "summary_writer", ".", "add_scalar", "(", "'train_recon_err'", ",", "train_recon_err", ",", "n_epoch", ")", "summary_writer", ".", "add_scalar", "(", "'val_acc'", ",", "val_acc", ",", "n_epoch", ")", "summary_writer", ".", "add_scalar", "(", "'val_loss'", ",", "val_loss", ",", "n_epoch", ")", "summary_writer", ".", "add_scalar", "(", "'val_recon_err'", ",", "val_recon_err", ",", "n_epoch", ")", "print", "(", "'Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f'", "%", "(", "n_epoch", ",", "train_acc", ",", "train_loss", ",", "train_recon_err", ")", ")", "print", "(", "'Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f'", "%", "(", "n_epoch", ",", "val_acc", ",", "val_loss", ",", "val_recon_err", ")", ")", "print", "(", "'SAVE CHECKPOINT'", ")", "module", ".", "save_checkpoint", "(", "prefix", "=", "model_prefix", ",", "epoch", "=", "n_epoch", ")", "n_epoch", "+=", "1", "lr_scheduler", ".", "learning_rate", "=", "learning_rate", "*", "(", "decay", "**", "n_epoch", ")" ]
Perform CapsNet training
[ "Perform", "CapsNet", "training" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L195-L238
24,141
apache/incubator-mxnet
example/capsnet/capsulenet.py
LossMetric.update
def update(self, labels, preds): """Update the hyper-parameters and loss of CapsNet""" batch_sum_metric = 0 batch_num_inst = 0 for label, pred_outcaps in zip(labels[0], preds[0]): label_np = int(label.asnumpy()) pred_label = int(np.argmax(pred_outcaps.asnumpy())) batch_sum_metric += int(label_np == pred_label) batch_num_inst += 1 batch_loss = preds[1].asnumpy() recon_loss = preds[2].asnumpy() self.sum_metric += batch_sum_metric self.num_inst += batch_num_inst self.loss += batch_loss self.recon_loss += recon_loss self.batch_sum_metric = batch_sum_metric self.batch_num_inst = batch_num_inst self.batch_loss = batch_loss self.n_batch += 1
python
def update(self, labels, preds): """Update the hyper-parameters and loss of CapsNet""" batch_sum_metric = 0 batch_num_inst = 0 for label, pred_outcaps in zip(labels[0], preds[0]): label_np = int(label.asnumpy()) pred_label = int(np.argmax(pred_outcaps.asnumpy())) batch_sum_metric += int(label_np == pred_label) batch_num_inst += 1 batch_loss = preds[1].asnumpy() recon_loss = preds[2].asnumpy() self.sum_metric += batch_sum_metric self.num_inst += batch_num_inst self.loss += batch_loss self.recon_loss += recon_loss self.batch_sum_metric = batch_sum_metric self.batch_num_inst = batch_num_inst self.batch_loss = batch_loss self.n_batch += 1
[ "def", "update", "(", "self", ",", "labels", ",", "preds", ")", ":", "batch_sum_metric", "=", "0", "batch_num_inst", "=", "0", "for", "label", ",", "pred_outcaps", "in", "zip", "(", "labels", "[", "0", "]", ",", "preds", "[", "0", "]", ")", ":", "label_np", "=", "int", "(", "label", ".", "asnumpy", "(", ")", ")", "pred_label", "=", "int", "(", "np", ".", "argmax", "(", "pred_outcaps", ".", "asnumpy", "(", ")", ")", ")", "batch_sum_metric", "+=", "int", "(", "label_np", "==", "pred_label", ")", "batch_num_inst", "+=", "1", "batch_loss", "=", "preds", "[", "1", "]", ".", "asnumpy", "(", ")", "recon_loss", "=", "preds", "[", "2", "]", ".", "asnumpy", "(", ")", "self", ".", "sum_metric", "+=", "batch_sum_metric", "self", ".", "num_inst", "+=", "batch_num_inst", "self", ".", "loss", "+=", "batch_loss", "self", ".", "recon_loss", "+=", "recon_loss", "self", ".", "batch_sum_metric", "=", "batch_sum_metric", "self", ".", "batch_num_inst", "=", "batch_num_inst", "self", ".", "batch_loss", "=", "batch_loss", "self", ".", "n_batch", "+=", "1" ]
Update the hyper-parameters and loss of CapsNet
[ "Update", "the", "hyper", "-", "parameters", "and", "loss", "of", "CapsNet" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L140-L158
24,142
apache/incubator-mxnet
example/capsnet/capsulenet.py
MNISTCustomIter.next
def next(self): """Generate next of iterator""" if self.iter_next(): if self.is_train: data_raw_list = self.getdata() data_shifted = [] for data_raw in data_raw_list[0]: data_shifted.append(random_shift(data_raw.asnumpy(), 0.1, 0.1)) return mx.io.DataBatch(data=[mx.nd.array(data_shifted)], label=self.getlabel(), pad=self.getpad(), index=None) else: return mx.io.DataBatch(data=self.getdata(), label=self.getlabel(), pad=self.getpad(), index=None) else: raise StopIteration
python
def next(self): """Generate next of iterator""" if self.iter_next(): if self.is_train: data_raw_list = self.getdata() data_shifted = [] for data_raw in data_raw_list[0]: data_shifted.append(random_shift(data_raw.asnumpy(), 0.1, 0.1)) return mx.io.DataBatch(data=[mx.nd.array(data_shifted)], label=self.getlabel(), pad=self.getpad(), index=None) else: return mx.io.DataBatch(data=self.getdata(), label=self.getlabel(), pad=self.getpad(), index=None) else: raise StopIteration
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "iter_next", "(", ")", ":", "if", "self", ".", "is_train", ":", "data_raw_list", "=", "self", ".", "getdata", "(", ")", "data_shifted", "=", "[", "]", "for", "data_raw", "in", "data_raw_list", "[", "0", "]", ":", "data_shifted", ".", "append", "(", "random_shift", "(", "data_raw", ".", "asnumpy", "(", ")", ",", "0.1", ",", "0.1", ")", ")", "return", "mx", ".", "io", ".", "DataBatch", "(", "data", "=", "[", "mx", ".", "nd", ".", "array", "(", "data_shifted", ")", "]", ",", "label", "=", "self", ".", "getlabel", "(", ")", ",", "pad", "=", "self", ".", "getpad", "(", ")", ",", "index", "=", "None", ")", "else", ":", "return", "mx", ".", "io", ".", "DataBatch", "(", "data", "=", "self", ".", "getdata", "(", ")", ",", "label", "=", "self", ".", "getlabel", "(", ")", ",", "pad", "=", "self", ".", "getpad", "(", ")", ",", "index", "=", "None", ")", "else", ":", "raise", "StopIteration" ]
Generate next of iterator
[ "Generate", "next", "of", "iterator" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/capsnet/capsulenet.py#L304-L318
24,143
apache/incubator-mxnet
python/mxnet/attribute.py
AttrScope.get
def get(self, attr): """ Get the attribute dict given the attribute set by the symbol. Parameters ---------- attr : dict of string to string The attribute passed in by user during symbol creation. Returns ------- attr : dict of string to string Updated attributes to add other scope related attributes. """ if self._attr: ret = self._attr.copy() if attr: ret.update(attr) return ret else: return attr if attr else {}
python
def get(self, attr): """ Get the attribute dict given the attribute set by the symbol. Parameters ---------- attr : dict of string to string The attribute passed in by user during symbol creation. Returns ------- attr : dict of string to string Updated attributes to add other scope related attributes. """ if self._attr: ret = self._attr.copy() if attr: ret.update(attr) return ret else: return attr if attr else {}
[ "def", "get", "(", "self", ",", "attr", ")", ":", "if", "self", ".", "_attr", ":", "ret", "=", "self", ".", "_attr", ".", "copy", "(", ")", "if", "attr", ":", "ret", ".", "update", "(", "attr", ")", "return", "ret", "else", ":", "return", "attr", "if", "attr", "else", "{", "}" ]
Get the attribute dict given the attribute set by the symbol. Parameters ---------- attr : dict of string to string The attribute passed in by user during symbol creation. Returns ------- attr : dict of string to string Updated attributes to add other scope related attributes.
[ "Get", "the", "attribute", "dict", "given", "the", "attribute", "set", "by", "the", "symbol", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/attribute.py#L47-L67
24,144
apache/incubator-mxnet
python/mxnet/model.py
_create_sparse_kvstore
def _create_sparse_kvstore(kvstore): """Create kvstore assuming some parameters' storage types are row_sparse. Parameters ---------- kvstore : KVStore or str The kvstore. Returns ------- kvstore : KVStore update_on_kvstore : bool. Always True. """ # always update on kvstore update_on_kvstore = True if isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): kv = kvs.create(kvstore) else: raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. " "The type must be KVStore or str." % kvstore) return (kv, update_on_kvstore)
python
def _create_sparse_kvstore(kvstore): """Create kvstore assuming some parameters' storage types are row_sparse. Parameters ---------- kvstore : KVStore or str The kvstore. Returns ------- kvstore : KVStore update_on_kvstore : bool. Always True. """ # always update on kvstore update_on_kvstore = True if isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): kv = kvs.create(kvstore) else: raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. " "The type must be KVStore or str." % kvstore) return (kv, update_on_kvstore)
[ "def", "_create_sparse_kvstore", "(", "kvstore", ")", ":", "# always update on kvstore", "update_on_kvstore", "=", "True", "if", "isinstance", "(", "kvstore", ",", "kvs", ".", "KVStore", ")", ":", "kv", "=", "kvstore", "elif", "isinstance", "(", "kvstore", ",", "str", ")", ":", "kv", "=", "kvs", ".", "create", "(", "kvstore", ")", "else", ":", "raise", "TypeError", "(", "\"Cannot create '%s' KVStore with row_sparse parameters. \"", "\"The type must be KVStore or str.\"", "%", "kvstore", ")", "return", "(", "kv", ",", "update_on_kvstore", ")" ]
Create kvstore assuming some parameters' storage types are row_sparse. Parameters ---------- kvstore : KVStore or str The kvstore. Returns ------- kvstore : KVStore update_on_kvstore : bool. Always True.
[ "Create", "kvstore", "assuming", "some", "parameters", "storage", "types", "are", "row_sparse", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L58-L80
24,145
apache/incubator-mxnet
python/mxnet/model.py
_create_kvstore
def _create_kvstore(kvstore, num_device, arg_params): """Create kvstore This function select and create a proper kvstore if given the kvstore type. Parameters ---------- kvstore : KVStore or str The kvstore. num_device : int The number of devices arg_params : dict of str to `NDArray`. Model parameter, dict of name to `NDArray` of net's weights. """ update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1"))) if kvstore is None: kv = None elif isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): # create kvstore using the string type if num_device == 1 and 'dist' not in kvstore: # no need to use kv for single device and single machine kv = None else: kv = kvs.create(kvstore) if kvstore == 'local': # automatically select a proper local max_size = max(np.prod(param.shape) for param in arg_params.values()) if max_size > 1024 * 1024 * 16: update_on_kvstore = False else: raise TypeError('kvstore must be KVStore, str or None') if kv is None: update_on_kvstore = False return (kv, update_on_kvstore)
python
def _create_kvstore(kvstore, num_device, arg_params): """Create kvstore This function select and create a proper kvstore if given the kvstore type. Parameters ---------- kvstore : KVStore or str The kvstore. num_device : int The number of devices arg_params : dict of str to `NDArray`. Model parameter, dict of name to `NDArray` of net's weights. """ update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1"))) if kvstore is None: kv = None elif isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): # create kvstore using the string type if num_device == 1 and 'dist' not in kvstore: # no need to use kv for single device and single machine kv = None else: kv = kvs.create(kvstore) if kvstore == 'local': # automatically select a proper local max_size = max(np.prod(param.shape) for param in arg_params.values()) if max_size > 1024 * 1024 * 16: update_on_kvstore = False else: raise TypeError('kvstore must be KVStore, str or None') if kv is None: update_on_kvstore = False return (kv, update_on_kvstore)
[ "def", "_create_kvstore", "(", "kvstore", ",", "num_device", ",", "arg_params", ")", ":", "update_on_kvstore", "=", "bool", "(", "int", "(", "os", ".", "getenv", "(", "'MXNET_UPDATE_ON_KVSTORE'", ",", "\"1\"", ")", ")", ")", "if", "kvstore", "is", "None", ":", "kv", "=", "None", "elif", "isinstance", "(", "kvstore", ",", "kvs", ".", "KVStore", ")", ":", "kv", "=", "kvstore", "elif", "isinstance", "(", "kvstore", ",", "str", ")", ":", "# create kvstore using the string type", "if", "num_device", "==", "1", "and", "'dist'", "not", "in", "kvstore", ":", "# no need to use kv for single device and single machine", "kv", "=", "None", "else", ":", "kv", "=", "kvs", ".", "create", "(", "kvstore", ")", "if", "kvstore", "==", "'local'", ":", "# automatically select a proper local", "max_size", "=", "max", "(", "np", ".", "prod", "(", "param", ".", "shape", ")", "for", "param", "in", "arg_params", ".", "values", "(", ")", ")", "if", "max_size", ">", "1024", "*", "1024", "*", "16", ":", "update_on_kvstore", "=", "False", "else", ":", "raise", "TypeError", "(", "'kvstore must be KVStore, str or None'", ")", "if", "kv", "is", "None", ":", "update_on_kvstore", "=", "False", "return", "(", "kv", ",", "update_on_kvstore", ")" ]
Create kvstore This function select and create a proper kvstore if given the kvstore type. Parameters ---------- kvstore : KVStore or str The kvstore. num_device : int The number of devices arg_params : dict of str to `NDArray`. Model parameter, dict of name to `NDArray` of net's weights.
[ "Create", "kvstore", "This", "function", "select", "and", "create", "a", "proper", "kvstore", "if", "given", "the", "kvstore", "type", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L82-L119
24,146
apache/incubator-mxnet
python/mxnet/model.py
_update_params_on_kvstore_nccl
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names): """Perform update of param_arrays from grad_arrays on NCCL kvstore.""" valid_indices = [index for index, grad_list in enumerate(grad_arrays) if grad_list[0] is not None] valid_grad_arrays = [grad_arrays[i] for i in valid_indices] valid_param_arrays = [param_arrays[i] for i in valid_indices] valid_param_names = [param_names[i] for i in valid_indices] size = len(valid_grad_arrays) start = 0 # Use aggregation by default only with NCCL default_batch = '16' batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch)) while start < size: end = start + batch if start + batch < size else size # push gradient, priority is negative index kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start) # pull back the weights kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start) start = end
python
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names): """Perform update of param_arrays from grad_arrays on NCCL kvstore.""" valid_indices = [index for index, grad_list in enumerate(grad_arrays) if grad_list[0] is not None] valid_grad_arrays = [grad_arrays[i] for i in valid_indices] valid_param_arrays = [param_arrays[i] for i in valid_indices] valid_param_names = [param_names[i] for i in valid_indices] size = len(valid_grad_arrays) start = 0 # Use aggregation by default only with NCCL default_batch = '16' batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch)) while start < size: end = start + batch if start + batch < size else size # push gradient, priority is negative index kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start) # pull back the weights kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start) start = end
[ "def", "_update_params_on_kvstore_nccl", "(", "param_arrays", ",", "grad_arrays", ",", "kvstore", ",", "param_names", ")", ":", "valid_indices", "=", "[", "index", "for", "index", ",", "grad_list", "in", "enumerate", "(", "grad_arrays", ")", "if", "grad_list", "[", "0", "]", "is", "not", "None", "]", "valid_grad_arrays", "=", "[", "grad_arrays", "[", "i", "]", "for", "i", "in", "valid_indices", "]", "valid_param_arrays", "=", "[", "param_arrays", "[", "i", "]", "for", "i", "in", "valid_indices", "]", "valid_param_names", "=", "[", "param_names", "[", "i", "]", "for", "i", "in", "valid_indices", "]", "size", "=", "len", "(", "valid_grad_arrays", ")", "start", "=", "0", "# Use aggregation by default only with NCCL", "default_batch", "=", "'16'", "batch", "=", "int", "(", "os", ".", "getenv", "(", "'MXNET_UPDATE_AGGREGATION_SIZE'", ",", "default_batch", ")", ")", "while", "start", "<", "size", ":", "end", "=", "start", "+", "batch", "if", "start", "+", "batch", "<", "size", "else", "size", "# push gradient, priority is negative index", "kvstore", ".", "push", "(", "valid_param_names", "[", "start", ":", "end", "]", ",", "valid_grad_arrays", "[", "start", ":", "end", "]", ",", "priority", "=", "-", "start", ")", "# pull back the weights", "kvstore", ".", "pull", "(", "valid_param_names", "[", "start", ":", "end", "]", ",", "valid_param_arrays", "[", "start", ":", "end", "]", ",", "priority", "=", "-", "start", ")", "start", "=", "end" ]
Perform update of param_arrays from grad_arrays on NCCL kvstore.
[ "Perform", "update", "of", "param_arrays", "from", "grad_arrays", "on", "NCCL", "kvstore", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L130-L148
24,147
apache/incubator-mxnet
python/mxnet/model.py
_update_params_on_kvstore
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names): """Perform update of param_arrays from grad_arrays on kvstore.""" for index, pair in enumerate(zip(param_arrays, grad_arrays)): arg_list, grad_list = pair if grad_list[0] is None: continue name = param_names[index] # push gradient, priority is negative index kvstore.push(name, grad_list, priority=-index) # pull back the weights kvstore.pull(name, arg_list, priority=-index)
python
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names): """Perform update of param_arrays from grad_arrays on kvstore.""" for index, pair in enumerate(zip(param_arrays, grad_arrays)): arg_list, grad_list = pair if grad_list[0] is None: continue name = param_names[index] # push gradient, priority is negative index kvstore.push(name, grad_list, priority=-index) # pull back the weights kvstore.pull(name, arg_list, priority=-index)
[ "def", "_update_params_on_kvstore", "(", "param_arrays", ",", "grad_arrays", ",", "kvstore", ",", "param_names", ")", ":", "for", "index", ",", "pair", "in", "enumerate", "(", "zip", "(", "param_arrays", ",", "grad_arrays", ")", ")", ":", "arg_list", ",", "grad_list", "=", "pair", "if", "grad_list", "[", "0", "]", "is", "None", ":", "continue", "name", "=", "param_names", "[", "index", "]", "# push gradient, priority is negative index", "kvstore", ".", "push", "(", "name", ",", "grad_list", ",", "priority", "=", "-", "index", ")", "# pull back the weights", "kvstore", ".", "pull", "(", "name", ",", "arg_list", ",", "priority", "=", "-", "index", ")" ]
Perform update of param_arrays from grad_arrays on kvstore.
[ "Perform", "update", "of", "param_arrays", "from", "grad_arrays", "on", "kvstore", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L150-L160
24,148
apache/incubator-mxnet
python/mxnet/model.py
_update_params
def _update_params(param_arrays, grad_arrays, updater, num_device, kvstore=None, param_names=None): """Perform update of param_arrays from grad_arrays not on kvstore.""" updates = [[] for _ in range(num_device)] for i, pair in enumerate(zip(param_arrays, grad_arrays)): arg_list, grad_list = pair if grad_list[0] is None: continue index = i if kvstore: name = param_names[index] # push gradient, priority is negative index kvstore.push(name, grad_list, priority=-index) # pull back the sum gradients, to the same locations. kvstore.pull(name, grad_list, priority=-index) for k, p in enumerate(zip(arg_list, grad_list)): # faked an index here, to make optimizer create diff # state for the same index but on diff devs, TODO(mli) # use a better solution later w, g = p updates[k].append((index*num_device+k, g, w)) for dev_updates in updates: # update params if param_arrays and grad_arrays are not empty if dev_updates: i, w, g = zip(*dev_updates) updater(i, w, g)
python
def _update_params(param_arrays, grad_arrays, updater, num_device, kvstore=None, param_names=None): """Perform update of param_arrays from grad_arrays not on kvstore.""" updates = [[] for _ in range(num_device)] for i, pair in enumerate(zip(param_arrays, grad_arrays)): arg_list, grad_list = pair if grad_list[0] is None: continue index = i if kvstore: name = param_names[index] # push gradient, priority is negative index kvstore.push(name, grad_list, priority=-index) # pull back the sum gradients, to the same locations. kvstore.pull(name, grad_list, priority=-index) for k, p in enumerate(zip(arg_list, grad_list)): # faked an index here, to make optimizer create diff # state for the same index but on diff devs, TODO(mli) # use a better solution later w, g = p updates[k].append((index*num_device+k, g, w)) for dev_updates in updates: # update params if param_arrays and grad_arrays are not empty if dev_updates: i, w, g = zip(*dev_updates) updater(i, w, g)
[ "def", "_update_params", "(", "param_arrays", ",", "grad_arrays", ",", "updater", ",", "num_device", ",", "kvstore", "=", "None", ",", "param_names", "=", "None", ")", ":", "updates", "=", "[", "[", "]", "for", "_", "in", "range", "(", "num_device", ")", "]", "for", "i", ",", "pair", "in", "enumerate", "(", "zip", "(", "param_arrays", ",", "grad_arrays", ")", ")", ":", "arg_list", ",", "grad_list", "=", "pair", "if", "grad_list", "[", "0", "]", "is", "None", ":", "continue", "index", "=", "i", "if", "kvstore", ":", "name", "=", "param_names", "[", "index", "]", "# push gradient, priority is negative index", "kvstore", ".", "push", "(", "name", ",", "grad_list", ",", "priority", "=", "-", "index", ")", "# pull back the sum gradients, to the same locations.", "kvstore", ".", "pull", "(", "name", ",", "grad_list", ",", "priority", "=", "-", "index", ")", "for", "k", ",", "p", "in", "enumerate", "(", "zip", "(", "arg_list", ",", "grad_list", ")", ")", ":", "# faked an index here, to make optimizer create diff", "# state for the same index but on diff devs, TODO(mli)", "# use a better solution later", "w", ",", "g", "=", "p", "updates", "[", "k", "]", ".", "append", "(", "(", "index", "*", "num_device", "+", "k", ",", "g", ",", "w", ")", ")", "for", "dev_updates", "in", "updates", ":", "# update params if param_arrays and grad_arrays are not empty", "if", "dev_updates", ":", "i", ",", "w", ",", "g", "=", "zip", "(", "*", "dev_updates", ")", "updater", "(", "i", ",", "w", ",", "g", ")" ]
Perform update of param_arrays from grad_arrays not on kvstore.
[ "Perform", "update", "of", "param_arrays", "from", "grad_arrays", "not", "on", "kvstore", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L162-L187
24,149
apache/incubator-mxnet
python/mxnet/model.py
_multiple_callbacks
def _multiple_callbacks(callbacks, *args, **kwargs): """Sends args and kwargs to any configured callbacks. This handles the cases where the 'callbacks' variable is ``None``, a single function, or a list. """ if isinstance(callbacks, list): for cb in callbacks: cb(*args, **kwargs) return if callbacks: callbacks(*args, **kwargs)
python
def _multiple_callbacks(callbacks, *args, **kwargs): """Sends args and kwargs to any configured callbacks. This handles the cases where the 'callbacks' variable is ``None``, a single function, or a list. """ if isinstance(callbacks, list): for cb in callbacks: cb(*args, **kwargs) return if callbacks: callbacks(*args, **kwargs)
[ "def", "_multiple_callbacks", "(", "callbacks", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "callbacks", ",", "list", ")", ":", "for", "cb", "in", "callbacks", ":", "cb", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "if", "callbacks", ":", "callbacks", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Sends args and kwargs to any configured callbacks. This handles the cases where the 'callbacks' variable is ``None``, a single function, or a list.
[ "Sends", "args", "and", "kwargs", "to", "any", "configured", "callbacks", ".", "This", "handles", "the", "cases", "where", "the", "callbacks", "variable", "is", "None", "a", "single", "function", "or", "a", "list", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L190-L200
24,150
apache/incubator-mxnet
python/mxnet/model.py
save_checkpoint
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params): """Checkpoint the model data into file. Parameters ---------- prefix : str Prefix of model name. epoch : int The epoch number of the model. symbol : Symbol The input Symbol. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters. """ if symbol is not None: symbol.save('%s-symbol.json' % prefix) save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()} save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()}) param_name = '%s-%04d.params' % (prefix, epoch) nd.save(param_name, save_dict) logging.info('Saved checkpoint to \"%s\"', param_name)
python
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params): """Checkpoint the model data into file. Parameters ---------- prefix : str Prefix of model name. epoch : int The epoch number of the model. symbol : Symbol The input Symbol. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters. """ if symbol is not None: symbol.save('%s-symbol.json' % prefix) save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()} save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()}) param_name = '%s-%04d.params' % (prefix, epoch) nd.save(param_name, save_dict) logging.info('Saved checkpoint to \"%s\"', param_name)
[ "def", "save_checkpoint", "(", "prefix", ",", "epoch", ",", "symbol", ",", "arg_params", ",", "aux_params", ")", ":", "if", "symbol", "is", "not", "None", ":", "symbol", ".", "save", "(", "'%s-symbol.json'", "%", "prefix", ")", "save_dict", "=", "{", "(", "'arg:%s'", "%", "k", ")", ":", "v", ".", "as_in_context", "(", "cpu", "(", ")", ")", "for", "k", ",", "v", "in", "arg_params", ".", "items", "(", ")", "}", "save_dict", ".", "update", "(", "{", "(", "'aux:%s'", "%", "k", ")", ":", "v", ".", "as_in_context", "(", "cpu", "(", ")", ")", "for", "k", ",", "v", "in", "aux_params", ".", "items", "(", ")", "}", ")", "param_name", "=", "'%s-%04d.params'", "%", "(", "prefix", ",", "epoch", ")", "nd", ".", "save", "(", "param_name", ",", "save_dict", ")", "logging", ".", "info", "(", "'Saved checkpoint to \\\"%s\\\"'", ",", "param_name", ")" ]
Checkpoint the model data into file. Parameters ---------- prefix : str Prefix of model name. epoch : int The epoch number of the model. symbol : Symbol The input Symbol. arg_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's weights. aux_params : dict of str to NDArray Model parameter, dict of name to NDArray of net's auxiliary states. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters.
[ "Checkpoint", "the", "model", "data", "into", "file", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L394-L421
24,151
apache/incubator-mxnet
python/mxnet/model.py
FeedForward._check_arguments
def _check_arguments(self): """verify the argument of the default symbol and user provided parameters""" if self.argument_checked: return assert(self.symbol is not None) self.argument_checked = True # check if symbol contain duplicated names. _check_arguments(self.symbol) # rematch parameters to delete useless ones if self.allow_extra_params: if self.arg_params: arg_names = set(self.symbol.list_arguments()) self.arg_params = {k : v for k, v in self.arg_params.items() if k in arg_names} if self.aux_params: aux_names = set(self.symbol.list_auxiliary_states()) self.aux_params = {k : v for k, v in self.aux_params.items() if k in aux_names}
python
def _check_arguments(self): """verify the argument of the default symbol and user provided parameters""" if self.argument_checked: return assert(self.symbol is not None) self.argument_checked = True # check if symbol contain duplicated names. _check_arguments(self.symbol) # rematch parameters to delete useless ones if self.allow_extra_params: if self.arg_params: arg_names = set(self.symbol.list_arguments()) self.arg_params = {k : v for k, v in self.arg_params.items() if k in arg_names} if self.aux_params: aux_names = set(self.symbol.list_auxiliary_states()) self.aux_params = {k : v for k, v in self.aux_params.items() if k in aux_names}
[ "def", "_check_arguments", "(", "self", ")", ":", "if", "self", ".", "argument_checked", ":", "return", "assert", "(", "self", ".", "symbol", "is", "not", "None", ")", "self", ".", "argument_checked", "=", "True", "# check if symbol contain duplicated names.", "_check_arguments", "(", "self", ".", "symbol", ")", "# rematch parameters to delete useless ones", "if", "self", ".", "allow_extra_params", ":", "if", "self", ".", "arg_params", ":", "arg_names", "=", "set", "(", "self", ".", "symbol", ".", "list_arguments", "(", ")", ")", "self", ".", "arg_params", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "arg_params", ".", "items", "(", ")", "if", "k", "in", "arg_names", "}", "if", "self", ".", "aux_params", ":", "aux_names", "=", "set", "(", "self", ".", "symbol", ".", "list_auxiliary_states", "(", ")", ")", "self", ".", "aux_params", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "aux_params", ".", "items", "(", ")", "if", "k", "in", "aux_names", "}" ]
verify the argument of the default symbol and user provided parameters
[ "verify", "the", "argument", "of", "the", "default", "symbol", "and", "user", "provided", "parameters" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L546-L565
24,152
apache/incubator-mxnet
python/mxnet/model.py
FeedForward._init_params
def _init_params(self, inputs, overwrite=False): """Initialize weight parameters and auxiliary states.""" inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs] input_shapes = {item.name: item.shape for item in inputs} arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes) assert arg_shapes is not None input_dtypes = {item.name: item.dtype for item in inputs} arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes) assert arg_dtypes is not None arg_names = self.symbol.list_arguments() input_names = input_shapes.keys() param_names = [key for key in arg_names if key not in input_names] aux_names = self.symbol.list_auxiliary_states() param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes) if x[0] in param_names] arg_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in param_name_attrs} aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes) if x[0] in aux_names] aux_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in aux_name_attrs} for k, v in arg_params.items(): if self.arg_params and k in self.arg_params and (not overwrite): arg_params[k][:] = self.arg_params[k][:] else: self.initializer(k, v) for k, v in aux_params.items(): if self.aux_params and k in self.aux_params and (not overwrite): aux_params[k][:] = self.aux_params[k][:] else: self.initializer(k, v) self.arg_params = arg_params self.aux_params = aux_params return (arg_names, list(param_names), aux_names)
python
def _init_params(self, inputs, overwrite=False): """Initialize weight parameters and auxiliary states.""" inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs] input_shapes = {item.name: item.shape for item in inputs} arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes) assert arg_shapes is not None input_dtypes = {item.name: item.dtype for item in inputs} arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes) assert arg_dtypes is not None arg_names = self.symbol.list_arguments() input_names = input_shapes.keys() param_names = [key for key in arg_names if key not in input_names] aux_names = self.symbol.list_auxiliary_states() param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes) if x[0] in param_names] arg_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in param_name_attrs} aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes) if x[0] in aux_names] aux_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in aux_name_attrs} for k, v in arg_params.items(): if self.arg_params and k in self.arg_params and (not overwrite): arg_params[k][:] = self.arg_params[k][:] else: self.initializer(k, v) for k, v in aux_params.items(): if self.aux_params and k in self.aux_params and (not overwrite): aux_params[k][:] = self.aux_params[k][:] else: self.initializer(k, v) self.arg_params = arg_params self.aux_params = aux_params return (arg_names, list(param_names), aux_names)
[ "def", "_init_params", "(", "self", ",", "inputs", ",", "overwrite", "=", "False", ")", ":", "inputs", "=", "[", "x", "if", "isinstance", "(", "x", ",", "DataDesc", ")", "else", "DataDesc", "(", "*", "x", ")", "for", "x", "in", "inputs", "]", "input_shapes", "=", "{", "item", ".", "name", ":", "item", ".", "shape", "for", "item", "in", "inputs", "}", "arg_shapes", ",", "_", ",", "aux_shapes", "=", "self", ".", "symbol", ".", "infer_shape", "(", "*", "*", "input_shapes", ")", "assert", "arg_shapes", "is", "not", "None", "input_dtypes", "=", "{", "item", ".", "name", ":", "item", ".", "dtype", "for", "item", "in", "inputs", "}", "arg_dtypes", ",", "_", ",", "aux_dtypes", "=", "self", ".", "symbol", ".", "infer_type", "(", "*", "*", "input_dtypes", ")", "assert", "arg_dtypes", "is", "not", "None", "arg_names", "=", "self", ".", "symbol", ".", "list_arguments", "(", ")", "input_names", "=", "input_shapes", ".", "keys", "(", ")", "param_names", "=", "[", "key", "for", "key", "in", "arg_names", "if", "key", "not", "in", "input_names", "]", "aux_names", "=", "self", ".", "symbol", ".", "list_auxiliary_states", "(", ")", "param_name_attrs", "=", "[", "x", "for", "x", "in", "zip", "(", "arg_names", ",", "arg_shapes", ",", "arg_dtypes", ")", "if", "x", "[", "0", "]", "in", "param_names", "]", "arg_params", "=", "{", "k", ":", "nd", ".", "zeros", "(", "shape", "=", "s", ",", "dtype", "=", "t", ")", "for", "k", ",", "s", ",", "t", "in", "param_name_attrs", "}", "aux_name_attrs", "=", "[", "x", "for", "x", "in", "zip", "(", "aux_names", ",", "aux_shapes", ",", "aux_dtypes", ")", "if", "x", "[", "0", "]", "in", "aux_names", "]", "aux_params", "=", "{", "k", ":", "nd", ".", "zeros", "(", "shape", "=", "s", ",", "dtype", "=", "t", ")", "for", "k", ",", "s", ",", "t", "in", "aux_name_attrs", "}", "for", "k", ",", "v", "in", "arg_params", ".", "items", "(", ")", ":", "if", "self", ".", "arg_params", "and", "k", "in", "self", ".", "arg_params", "and", "(", "not", "overwrite", ")", ":", "arg_params", "[", "k", "]", "[", ":", "]", "=", "self", ".", "arg_params", "[", "k", "]", "[", ":", "]", "else", ":", "self", ".", "initializer", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "aux_params", ".", "items", "(", ")", ":", "if", "self", ".", "aux_params", "and", "k", "in", "self", ".", "aux_params", "and", "(", "not", "overwrite", ")", ":", "aux_params", "[", "k", "]", "[", ":", "]", "=", "self", ".", "aux_params", "[", "k", "]", "[", ":", "]", "else", ":", "self", ".", "initializer", "(", "k", ",", "v", ")", "self", ".", "arg_params", "=", "arg_params", "self", ".", "aux_params", "=", "aux_params", "return", "(", "arg_names", ",", "list", "(", "param_names", ")", ",", "aux_names", ")" ]
Initialize weight parameters and auxiliary states.
[ "Initialize", "weight", "parameters", "and", "auxiliary", "states", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L573-L611
24,153
apache/incubator-mxnet
python/mxnet/model.py
FeedForward._init_predictor
def _init_predictor(self, input_shapes, type_dict=None): """Initialize the predictor module for running prediction.""" shapes = {name: self.arg_params[name].shape for name in self.arg_params} shapes.update(dict(input_shapes)) if self._pred_exec is not None: arg_shapes, _, _ = self.symbol.infer_shape(**shapes) assert arg_shapes is not None, "Incomplete input shapes" pred_shapes = [x.shape for x in self._pred_exec.arg_arrays] if arg_shapes == pred_shapes: return # for now only use the first device pred_exec = self.symbol.simple_bind( self.ctx[0], grad_req='null', type_dict=type_dict, **shapes) pred_exec.copy_params_from(self.arg_params, self.aux_params) _check_arguments(self.symbol) self._pred_exec = pred_exec
python
def _init_predictor(self, input_shapes, type_dict=None): """Initialize the predictor module for running prediction.""" shapes = {name: self.arg_params[name].shape for name in self.arg_params} shapes.update(dict(input_shapes)) if self._pred_exec is not None: arg_shapes, _, _ = self.symbol.infer_shape(**shapes) assert arg_shapes is not None, "Incomplete input shapes" pred_shapes = [x.shape for x in self._pred_exec.arg_arrays] if arg_shapes == pred_shapes: return # for now only use the first device pred_exec = self.symbol.simple_bind( self.ctx[0], grad_req='null', type_dict=type_dict, **shapes) pred_exec.copy_params_from(self.arg_params, self.aux_params) _check_arguments(self.symbol) self._pred_exec = pred_exec
[ "def", "_init_predictor", "(", "self", ",", "input_shapes", ",", "type_dict", "=", "None", ")", ":", "shapes", "=", "{", "name", ":", "self", ".", "arg_params", "[", "name", "]", ".", "shape", "for", "name", "in", "self", ".", "arg_params", "}", "shapes", ".", "update", "(", "dict", "(", "input_shapes", ")", ")", "if", "self", ".", "_pred_exec", "is", "not", "None", ":", "arg_shapes", ",", "_", ",", "_", "=", "self", ".", "symbol", ".", "infer_shape", "(", "*", "*", "shapes", ")", "assert", "arg_shapes", "is", "not", "None", ",", "\"Incomplete input shapes\"", "pred_shapes", "=", "[", "x", ".", "shape", "for", "x", "in", "self", ".", "_pred_exec", ".", "arg_arrays", "]", "if", "arg_shapes", "==", "pred_shapes", ":", "return", "# for now only use the first device", "pred_exec", "=", "self", ".", "symbol", ".", "simple_bind", "(", "self", ".", "ctx", "[", "0", "]", ",", "grad_req", "=", "'null'", ",", "type_dict", "=", "type_dict", ",", "*", "*", "shapes", ")", "pred_exec", ".", "copy_params_from", "(", "self", ".", "arg_params", ",", "self", ".", "aux_params", ")", "_check_arguments", "(", "self", ".", "symbol", ")", "self", ".", "_pred_exec", "=", "pred_exec" ]
Initialize the predictor module for running prediction.
[ "Initialize", "the", "predictor", "module", "for", "running", "prediction", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L621-L637
24,154
apache/incubator-mxnet
python/mxnet/model.py
FeedForward._init_iter
def _init_iter(self, X, y, is_train): """Initialize the iterator given input.""" if isinstance(X, (np.ndarray, nd.NDArray)): if y is None: if is_train: raise ValueError('y must be specified when X is numpy.ndarray') else: y = np.zeros(X.shape[0]) if not isinstance(y, (np.ndarray, nd.NDArray)): raise TypeError('y must be ndarray when X is numpy.ndarray') if X.shape[0] != y.shape[0]: raise ValueError("The numbers of data points and labels not equal") if y.ndim == 2 and y.shape[1] == 1: y = y.flatten() if y.ndim != 1: raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)") if is_train: return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=is_train, last_batch_handle='roll_over') else: return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False) if not isinstance(X, io.DataIter): raise TypeError('X must be DataIter, NDArray or numpy.ndarray') return X
python
def _init_iter(self, X, y, is_train): """Initialize the iterator given input.""" if isinstance(X, (np.ndarray, nd.NDArray)): if y is None: if is_train: raise ValueError('y must be specified when X is numpy.ndarray') else: y = np.zeros(X.shape[0]) if not isinstance(y, (np.ndarray, nd.NDArray)): raise TypeError('y must be ndarray when X is numpy.ndarray') if X.shape[0] != y.shape[0]: raise ValueError("The numbers of data points and labels not equal") if y.ndim == 2 and y.shape[1] == 1: y = y.flatten() if y.ndim != 1: raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)") if is_train: return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=is_train, last_batch_handle='roll_over') else: return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False) if not isinstance(X, io.DataIter): raise TypeError('X must be DataIter, NDArray or numpy.ndarray') return X
[ "def", "_init_iter", "(", "self", ",", "X", ",", "y", ",", "is_train", ")", ":", "if", "isinstance", "(", "X", ",", "(", "np", ".", "ndarray", ",", "nd", ".", "NDArray", ")", ")", ":", "if", "y", "is", "None", ":", "if", "is_train", ":", "raise", "ValueError", "(", "'y must be specified when X is numpy.ndarray'", ")", "else", ":", "y", "=", "np", ".", "zeros", "(", "X", ".", "shape", "[", "0", "]", ")", "if", "not", "isinstance", "(", "y", ",", "(", "np", ".", "ndarray", ",", "nd", ".", "NDArray", ")", ")", ":", "raise", "TypeError", "(", "'y must be ndarray when X is numpy.ndarray'", ")", "if", "X", ".", "shape", "[", "0", "]", "!=", "y", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"The numbers of data points and labels not equal\"", ")", "if", "y", ".", "ndim", "==", "2", "and", "y", ".", "shape", "[", "1", "]", "==", "1", ":", "y", "=", "y", ".", "flatten", "(", ")", "if", "y", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"Label must be 1D or 2D (with 2nd dimension being 1)\"", ")", "if", "is_train", ":", "return", "io", ".", "NDArrayIter", "(", "X", ",", "y", ",", "min", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "numpy_batch_size", ")", ",", "shuffle", "=", "is_train", ",", "last_batch_handle", "=", "'roll_over'", ")", "else", ":", "return", "io", ".", "NDArrayIter", "(", "X", ",", "y", ",", "min", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "numpy_batch_size", ")", ",", "shuffle", "=", "False", ")", "if", "not", "isinstance", "(", "X", ",", "io", ".", "DataIter", ")", ":", "raise", "TypeError", "(", "'X must be DataIter, NDArray or numpy.ndarray'", ")", "return", "X" ]
Initialize the iterator given input.
[ "Initialize", "the", "iterator", "given", "input", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L639-L662
24,155
apache/incubator-mxnet
python/mxnet/model.py
FeedForward._init_eval_iter
def _init_eval_iter(self, eval_data): """Initialize the iterator given eval_data.""" if eval_data is None: return eval_data if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2: if eval_data[0] is not None: if eval_data[1] is None and isinstance(eval_data[0], io.DataIter): return eval_data[0] input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list) else eval_data[0]) input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list) else eval_data[1]) return self._init_iter(input_data, input_label, is_train=True) else: raise ValueError("Eval data is NONE") if not isinstance(eval_data, io.DataIter): raise TypeError('Eval data must be DataIter, or ' \ 'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)') return eval_data
python
def _init_eval_iter(self, eval_data): """Initialize the iterator given eval_data.""" if eval_data is None: return eval_data if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2: if eval_data[0] is not None: if eval_data[1] is None and isinstance(eval_data[0], io.DataIter): return eval_data[0] input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list) else eval_data[0]) input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list) else eval_data[1]) return self._init_iter(input_data, input_label, is_train=True) else: raise ValueError("Eval data is NONE") if not isinstance(eval_data, io.DataIter): raise TypeError('Eval data must be DataIter, or ' \ 'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)') return eval_data
[ "def", "_init_eval_iter", "(", "self", ",", "eval_data", ")", ":", "if", "eval_data", "is", "None", ":", "return", "eval_data", "if", "isinstance", "(", "eval_data", ",", "(", "tuple", ",", "list", ")", ")", "and", "len", "(", "eval_data", ")", "==", "2", ":", "if", "eval_data", "[", "0", "]", "is", "not", "None", ":", "if", "eval_data", "[", "1", "]", "is", "None", "and", "isinstance", "(", "eval_data", "[", "0", "]", ",", "io", ".", "DataIter", ")", ":", "return", "eval_data", "[", "0", "]", "input_data", "=", "(", "np", ".", "array", "(", "eval_data", "[", "0", "]", ")", "if", "isinstance", "(", "eval_data", "[", "0", "]", ",", "list", ")", "else", "eval_data", "[", "0", "]", ")", "input_label", "=", "(", "np", ".", "array", "(", "eval_data", "[", "1", "]", ")", "if", "isinstance", "(", "eval_data", "[", "1", "]", ",", "list", ")", "else", "eval_data", "[", "1", "]", ")", "return", "self", ".", "_init_iter", "(", "input_data", ",", "input_label", ",", "is_train", "=", "True", ")", "else", ":", "raise", "ValueError", "(", "\"Eval data is NONE\"", ")", "if", "not", "isinstance", "(", "eval_data", ",", "io", ".", "DataIter", ")", ":", "raise", "TypeError", "(", "'Eval data must be DataIter, or '", "'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)'", ")", "return", "eval_data" ]
Initialize the iterator given eval_data.
[ "Initialize", "the", "iterator", "given", "eval_data", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L664-L682
24,156
apache/incubator-mxnet
python/mxnet/model.py
FeedForward.predict
def predict(self, X, num_batch=None, return_data=False, reset=True): """Run the prediction, always only use one device. Parameters ---------- X : mxnet.DataIter num_batch : int or None The number of batch to run. Go though all batches if ``None``. Returns ------- y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs. The predicted value of the output. """ X = self._init_iter(X, None, is_train=False) if reset: X.reset() data_shapes = X.provide_data data_names = [x[0] for x in data_shapes] type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items()) for x in X.provide_data: if isinstance(x, DataDesc): type_dict[x.name] = x.dtype else: type_dict[x[0]] = mx_real_t self._init_predictor(data_shapes, type_dict) batch_size = X.batch_size data_arrays = [self._pred_exec.arg_dict[name] for name in data_names] output_list = [[] for _ in range(len(self._pred_exec.outputs))] if return_data: data_list = [[] for _ in X.provide_data] label_list = [[] for _ in X.provide_label] i = 0 for batch in X: _load_data(batch, data_arrays) self._pred_exec.forward(is_train=False) padded = batch.pad real_size = batch_size - padded for o_list, o_nd in zip(output_list, self._pred_exec.outputs): o_list.append(o_nd[0:real_size].asnumpy()) if return_data: for j, x in enumerate(batch.data): data_list[j].append(x[0:real_size].asnumpy()) for j, x in enumerate(batch.label): label_list[j].append(x[0:real_size].asnumpy()) i += 1 if num_batch is not None and i == num_batch: break outputs = [np.concatenate(x) for x in output_list] if len(outputs) == 1: outputs = outputs[0] if return_data: data = [np.concatenate(x) for x in data_list] label = [np.concatenate(x) for x in label_list] if len(data) == 1: data = data[0] if len(label) == 1: label = label[0] return outputs, data, label else: return outputs
python
def predict(self, X, num_batch=None, return_data=False, reset=True): """Run the prediction, always only use one device. Parameters ---------- X : mxnet.DataIter num_batch : int or None The number of batch to run. Go though all batches if ``None``. Returns ------- y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs. The predicted value of the output. """ X = self._init_iter(X, None, is_train=False) if reset: X.reset() data_shapes = X.provide_data data_names = [x[0] for x in data_shapes] type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items()) for x in X.provide_data: if isinstance(x, DataDesc): type_dict[x.name] = x.dtype else: type_dict[x[0]] = mx_real_t self._init_predictor(data_shapes, type_dict) batch_size = X.batch_size data_arrays = [self._pred_exec.arg_dict[name] for name in data_names] output_list = [[] for _ in range(len(self._pred_exec.outputs))] if return_data: data_list = [[] for _ in X.provide_data] label_list = [[] for _ in X.provide_label] i = 0 for batch in X: _load_data(batch, data_arrays) self._pred_exec.forward(is_train=False) padded = batch.pad real_size = batch_size - padded for o_list, o_nd in zip(output_list, self._pred_exec.outputs): o_list.append(o_nd[0:real_size].asnumpy()) if return_data: for j, x in enumerate(batch.data): data_list[j].append(x[0:real_size].asnumpy()) for j, x in enumerate(batch.label): label_list[j].append(x[0:real_size].asnumpy()) i += 1 if num_batch is not None and i == num_batch: break outputs = [np.concatenate(x) for x in output_list] if len(outputs) == 1: outputs = outputs[0] if return_data: data = [np.concatenate(x) for x in data_list] label = [np.concatenate(x) for x in label_list] if len(data) == 1: data = data[0] if len(label) == 1: label = label[0] return outputs, data, label else: return outputs
[ "def", "predict", "(", "self", ",", "X", ",", "num_batch", "=", "None", ",", "return_data", "=", "False", ",", "reset", "=", "True", ")", ":", "X", "=", "self", ".", "_init_iter", "(", "X", ",", "None", ",", "is_train", "=", "False", ")", "if", "reset", ":", "X", ".", "reset", "(", ")", "data_shapes", "=", "X", ".", "provide_data", "data_names", "=", "[", "x", "[", "0", "]", "for", "x", "in", "data_shapes", "]", "type_dict", "=", "dict", "(", "(", "key", ",", "value", ".", "dtype", ")", "for", "(", "key", ",", "value", ")", "in", "self", ".", "arg_params", ".", "items", "(", ")", ")", "for", "x", "in", "X", ".", "provide_data", ":", "if", "isinstance", "(", "x", ",", "DataDesc", ")", ":", "type_dict", "[", "x", ".", "name", "]", "=", "x", ".", "dtype", "else", ":", "type_dict", "[", "x", "[", "0", "]", "]", "=", "mx_real_t", "self", ".", "_init_predictor", "(", "data_shapes", ",", "type_dict", ")", "batch_size", "=", "X", ".", "batch_size", "data_arrays", "=", "[", "self", ".", "_pred_exec", ".", "arg_dict", "[", "name", "]", "for", "name", "in", "data_names", "]", "output_list", "=", "[", "[", "]", "for", "_", "in", "range", "(", "len", "(", "self", ".", "_pred_exec", ".", "outputs", ")", ")", "]", "if", "return_data", ":", "data_list", "=", "[", "[", "]", "for", "_", "in", "X", ".", "provide_data", "]", "label_list", "=", "[", "[", "]", "for", "_", "in", "X", ".", "provide_label", "]", "i", "=", "0", "for", "batch", "in", "X", ":", "_load_data", "(", "batch", ",", "data_arrays", ")", "self", ".", "_pred_exec", ".", "forward", "(", "is_train", "=", "False", ")", "padded", "=", "batch", ".", "pad", "real_size", "=", "batch_size", "-", "padded", "for", "o_list", ",", "o_nd", "in", "zip", "(", "output_list", ",", "self", ".", "_pred_exec", ".", "outputs", ")", ":", "o_list", ".", "append", "(", "o_nd", "[", "0", ":", "real_size", "]", ".", "asnumpy", "(", ")", ")", "if", "return_data", ":", "for", "j", ",", "x", "in", "enumerate", "(", "batch", ".", "data", ")", ":", "data_list", "[", "j", "]", ".", "append", "(", "x", "[", "0", ":", "real_size", "]", ".", "asnumpy", "(", ")", ")", "for", "j", ",", "x", "in", "enumerate", "(", "batch", ".", "label", ")", ":", "label_list", "[", "j", "]", ".", "append", "(", "x", "[", "0", ":", "real_size", "]", ".", "asnumpy", "(", ")", ")", "i", "+=", "1", "if", "num_batch", "is", "not", "None", "and", "i", "==", "num_batch", ":", "break", "outputs", "=", "[", "np", ".", "concatenate", "(", "x", ")", "for", "x", "in", "output_list", "]", "if", "len", "(", "outputs", ")", "==", "1", ":", "outputs", "=", "outputs", "[", "0", "]", "if", "return_data", ":", "data", "=", "[", "np", ".", "concatenate", "(", "x", ")", "for", "x", "in", "data_list", "]", "label", "=", "[", "np", ".", "concatenate", "(", "x", ")", "for", "x", "in", "label_list", "]", "if", "len", "(", "data", ")", "==", "1", ":", "data", "=", "data", "[", "0", "]", "if", "len", "(", "label", ")", "==", "1", ":", "label", "=", "label", "[", "0", "]", "return", "outputs", ",", "data", ",", "label", "else", ":", "return", "outputs" ]
Run the prediction, always only use one device. Parameters ---------- X : mxnet.DataIter num_batch : int or None The number of batch to run. Go though all batches if ``None``. Returns ------- y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs. The predicted value of the output.
[ "Run", "the", "prediction", "always", "only", "use", "one", "device", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L684-L751
24,157
apache/incubator-mxnet
python/mxnet/model.py
FeedForward.score
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True): """Run the model given an input and calculate the score as assessed by an evaluation metric. Parameters ---------- X : mxnet.DataIter eval_metric : metric.metric The metric for calculating score. num_batch : int or None The number of batches to run. Go though all batches if ``None``. Returns ------- s : float The final score. """ # setup metric if not isinstance(eval_metric, metric.EvalMetric): eval_metric = metric.create(eval_metric) X = self._init_iter(X, None, is_train=False) if reset: X.reset() data_shapes = X.provide_data data_names = [x[0] for x in data_shapes] type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items()) for x in X.provide_data: if isinstance(x, DataDesc): type_dict[x.name] = x.dtype else: type_dict[x[0]] = mx_real_t self._init_predictor(data_shapes, type_dict) data_arrays = [self._pred_exec.arg_dict[name] for name in data_names] for i, batch in enumerate(X): if num_batch is not None and i == num_batch: break _load_data(batch, data_arrays) self._pred_exec.forward(is_train=False) eval_metric.update(batch.label, self._pred_exec.outputs) if batch_end_callback is not None: batch_end_params = BatchEndParam(epoch=0, nbatch=i, eval_metric=eval_metric, locals=locals()) _multiple_callbacks(batch_end_callback, batch_end_params) return eval_metric.get()[1]
python
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True): """Run the model given an input and calculate the score as assessed by an evaluation metric. Parameters ---------- X : mxnet.DataIter eval_metric : metric.metric The metric for calculating score. num_batch : int or None The number of batches to run. Go though all batches if ``None``. Returns ------- s : float The final score. """ # setup metric if not isinstance(eval_metric, metric.EvalMetric): eval_metric = metric.create(eval_metric) X = self._init_iter(X, None, is_train=False) if reset: X.reset() data_shapes = X.provide_data data_names = [x[0] for x in data_shapes] type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items()) for x in X.provide_data: if isinstance(x, DataDesc): type_dict[x.name] = x.dtype else: type_dict[x[0]] = mx_real_t self._init_predictor(data_shapes, type_dict) data_arrays = [self._pred_exec.arg_dict[name] for name in data_names] for i, batch in enumerate(X): if num_batch is not None and i == num_batch: break _load_data(batch, data_arrays) self._pred_exec.forward(is_train=False) eval_metric.update(batch.label, self._pred_exec.outputs) if batch_end_callback is not None: batch_end_params = BatchEndParam(epoch=0, nbatch=i, eval_metric=eval_metric, locals=locals()) _multiple_callbacks(batch_end_callback, batch_end_params) return eval_metric.get()[1]
[ "def", "score", "(", "self", ",", "X", ",", "eval_metric", "=", "'acc'", ",", "num_batch", "=", "None", ",", "batch_end_callback", "=", "None", ",", "reset", "=", "True", ")", ":", "# setup metric", "if", "not", "isinstance", "(", "eval_metric", ",", "metric", ".", "EvalMetric", ")", ":", "eval_metric", "=", "metric", ".", "create", "(", "eval_metric", ")", "X", "=", "self", ".", "_init_iter", "(", "X", ",", "None", ",", "is_train", "=", "False", ")", "if", "reset", ":", "X", ".", "reset", "(", ")", "data_shapes", "=", "X", ".", "provide_data", "data_names", "=", "[", "x", "[", "0", "]", "for", "x", "in", "data_shapes", "]", "type_dict", "=", "dict", "(", "(", "key", ",", "value", ".", "dtype", ")", "for", "(", "key", ",", "value", ")", "in", "self", ".", "arg_params", ".", "items", "(", ")", ")", "for", "x", "in", "X", ".", "provide_data", ":", "if", "isinstance", "(", "x", ",", "DataDesc", ")", ":", "type_dict", "[", "x", ".", "name", "]", "=", "x", ".", "dtype", "else", ":", "type_dict", "[", "x", "[", "0", "]", "]", "=", "mx_real_t", "self", ".", "_init_predictor", "(", "data_shapes", ",", "type_dict", ")", "data_arrays", "=", "[", "self", ".", "_pred_exec", ".", "arg_dict", "[", "name", "]", "for", "name", "in", "data_names", "]", "for", "i", ",", "batch", "in", "enumerate", "(", "X", ")", ":", "if", "num_batch", "is", "not", "None", "and", "i", "==", "num_batch", ":", "break", "_load_data", "(", "batch", ",", "data_arrays", ")", "self", ".", "_pred_exec", ".", "forward", "(", "is_train", "=", "False", ")", "eval_metric", ".", "update", "(", "batch", ".", "label", ",", "self", ".", "_pred_exec", ".", "outputs", ")", "if", "batch_end_callback", "is", "not", "None", ":", "batch_end_params", "=", "BatchEndParam", "(", "epoch", "=", "0", ",", "nbatch", "=", "i", ",", "eval_metric", "=", "eval_metric", ",", "locals", "=", "locals", "(", ")", ")", "_multiple_callbacks", "(", "batch_end_callback", ",", "batch_end_params", ")", "return", "eval_metric", ".", "get", "(", ")", "[", "1", "]" ]
Run the model given an input and calculate the score as assessed by an evaluation metric. Parameters ---------- X : mxnet.DataIter eval_metric : metric.metric The metric for calculating score. num_batch : int or None The number of batches to run. Go though all batches if ``None``. Returns ------- s : float The final score.
[ "Run", "the", "model", "given", "an", "input", "and", "calculate", "the", "score", "as", "assessed", "by", "an", "evaluation", "metric", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L753-L802
24,158
apache/incubator-mxnet
python/mxnet/model.py
FeedForward.create
def create(symbol, X, y=None, ctx=None, num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01), eval_data=None, eval_metric='acc', epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None, work_load_list=None, eval_end_callback=LogValidationMetricsCallback(), eval_batch_end_callback=None, **kwargs): """Functional style to create a model. This function is more consistent with functional languages such as R, where mutation is not allowed. Parameters ---------- symbol : Symbol The symbol configuration of a computation network. X : DataIter Training data. y : numpy.ndarray, optional If `X` is a ``numpy.ndarray``, `y` must be set. ctx : Context or list of Context, optional The device context of training and prediction. To use multi-GPU training, pass in a list of GPU contexts. num_epoch : int, optional The number of training epochs(epochs). epoch_size : int, optional Number of batches in a epoch. In default, it is set to ``ceil(num_train_examples / batch_size)``. optimizer : str or Optimizer, optional The name of the chosen optimizer, or an optimizer object, used for training. initializer : initializer function, optional The initialization scheme used. eval_data : DataIter or numpy.ndarray pair If `eval_set` is ``numpy.ndarray`` pair, it should be (`valid_data`, `valid_label`). eval_metric : metric.EvalMetric or str or callable The evaluation metric. Can be the name of an evaluation metric or a custom evaluation function that returns statistics based on a minibatch. epoch_end_callback : callable(epoch, symbol, arg_params, aux_states) A callback that is invoked at end of each epoch. This can be used to checkpoint model each epoch. batch_end_callback: callable(epoch) A callback that is invoked at end of each batch for print purposes. kvstore: KVStore or str, optional The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'. Defaults to 'local', often no need to change for single machine. logger : logging logger, optional When not specified, default logger will be used. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`. """ model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch, epoch_size=epoch_size, optimizer=optimizer, initializer=initializer, **kwargs) model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, logger=logger, work_load_list=work_load_list, eval_end_callback=eval_end_callback, eval_batch_end_callback=eval_batch_end_callback) return model
python
def create(symbol, X, y=None, ctx=None, num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01), eval_data=None, eval_metric='acc', epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None, work_load_list=None, eval_end_callback=LogValidationMetricsCallback(), eval_batch_end_callback=None, **kwargs): """Functional style to create a model. This function is more consistent with functional languages such as R, where mutation is not allowed. Parameters ---------- symbol : Symbol The symbol configuration of a computation network. X : DataIter Training data. y : numpy.ndarray, optional If `X` is a ``numpy.ndarray``, `y` must be set. ctx : Context or list of Context, optional The device context of training and prediction. To use multi-GPU training, pass in a list of GPU contexts. num_epoch : int, optional The number of training epochs(epochs). epoch_size : int, optional Number of batches in a epoch. In default, it is set to ``ceil(num_train_examples / batch_size)``. optimizer : str or Optimizer, optional The name of the chosen optimizer, or an optimizer object, used for training. initializer : initializer function, optional The initialization scheme used. eval_data : DataIter or numpy.ndarray pair If `eval_set` is ``numpy.ndarray`` pair, it should be (`valid_data`, `valid_label`). eval_metric : metric.EvalMetric or str or callable The evaluation metric. Can be the name of an evaluation metric or a custom evaluation function that returns statistics based on a minibatch. epoch_end_callback : callable(epoch, symbol, arg_params, aux_states) A callback that is invoked at end of each epoch. This can be used to checkpoint model each epoch. batch_end_callback: callable(epoch) A callback that is invoked at end of each batch for print purposes. kvstore: KVStore or str, optional The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'. Defaults to 'local', often no need to change for single machine. logger : logging logger, optional When not specified, default logger will be used. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`. """ model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch, epoch_size=epoch_size, optimizer=optimizer, initializer=initializer, **kwargs) model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, logger=logger, work_load_list=work_load_list, eval_end_callback=eval_end_callback, eval_batch_end_callback=eval_batch_end_callback) return model
[ "def", "create", "(", "symbol", ",", "X", ",", "y", "=", "None", ",", "ctx", "=", "None", ",", "num_epoch", "=", "None", ",", "epoch_size", "=", "None", ",", "optimizer", "=", "'sgd'", ",", "initializer", "=", "Uniform", "(", "0.01", ")", ",", "eval_data", "=", "None", ",", "eval_metric", "=", "'acc'", ",", "epoch_end_callback", "=", "None", ",", "batch_end_callback", "=", "None", ",", "kvstore", "=", "'local'", ",", "logger", "=", "None", ",", "work_load_list", "=", "None", ",", "eval_end_callback", "=", "LogValidationMetricsCallback", "(", ")", ",", "eval_batch_end_callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "model", "=", "FeedForward", "(", "symbol", ",", "ctx", "=", "ctx", ",", "num_epoch", "=", "num_epoch", ",", "epoch_size", "=", "epoch_size", ",", "optimizer", "=", "optimizer", ",", "initializer", "=", "initializer", ",", "*", "*", "kwargs", ")", "model", ".", "fit", "(", "X", ",", "y", ",", "eval_data", "=", "eval_data", ",", "eval_metric", "=", "eval_metric", ",", "epoch_end_callback", "=", "epoch_end_callback", ",", "batch_end_callback", "=", "batch_end_callback", ",", "kvstore", "=", "kvstore", ",", "logger", "=", "logger", ",", "work_load_list", "=", "work_load_list", ",", "eval_end_callback", "=", "eval_end_callback", ",", "eval_batch_end_callback", "=", "eval_batch_end_callback", ")", "return", "model" ]
Functional style to create a model. This function is more consistent with functional languages such as R, where mutation is not allowed. Parameters ---------- symbol : Symbol The symbol configuration of a computation network. X : DataIter Training data. y : numpy.ndarray, optional If `X` is a ``numpy.ndarray``, `y` must be set. ctx : Context or list of Context, optional The device context of training and prediction. To use multi-GPU training, pass in a list of GPU contexts. num_epoch : int, optional The number of training epochs(epochs). epoch_size : int, optional Number of batches in a epoch. In default, it is set to ``ceil(num_train_examples / batch_size)``. optimizer : str or Optimizer, optional The name of the chosen optimizer, or an optimizer object, used for training. initializer : initializer function, optional The initialization scheme used. eval_data : DataIter or numpy.ndarray pair If `eval_set` is ``numpy.ndarray`` pair, it should be (`valid_data`, `valid_label`). eval_metric : metric.EvalMetric or str or callable The evaluation metric. Can be the name of an evaluation metric or a custom evaluation function that returns statistics based on a minibatch. epoch_end_callback : callable(epoch, symbol, arg_params, aux_states) A callback that is invoked at end of each epoch. This can be used to checkpoint model each epoch. batch_end_callback: callable(epoch) A callback that is invoked at end of each batch for print purposes. kvstore: KVStore or str, optional The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'. Defaults to 'local', often no need to change for single machine. logger : logging logger, optional When not specified, default logger will be used. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`.
[ "Functional", "style", "to", "create", "a", "model", ".", "This", "function", "is", "more", "consistent", "with", "functional", "languages", "such", "as", "R", "where", "mutation", "is", "not", "allowed", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L962-L1025
24,159
apache/incubator-mxnet
example/cnn_chinese_text_classification/data_helpers.py
get_chinese_text
def get_chinese_text(): """Download the chinese_text dataset and unzip it""" if not os.path.isdir("data/"): os.system("mkdir data/") if (not os.path.exists('data/pos.txt')) or \ (not os.path.exists('data/neg')): os.system("wget -q https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/chinese_text.zip " "-P data/") os.chdir("./data") os.system("unzip -u chinese_text.zip") os.chdir("..")
python
def get_chinese_text(): """Download the chinese_text dataset and unzip it""" if not os.path.isdir("data/"): os.system("mkdir data/") if (not os.path.exists('data/pos.txt')) or \ (not os.path.exists('data/neg')): os.system("wget -q https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/chinese_text.zip " "-P data/") os.chdir("./data") os.system("unzip -u chinese_text.zip") os.chdir("..")
[ "def", "get_chinese_text", "(", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "\"data/\"", ")", ":", "os", ".", "system", "(", "\"mkdir data/\"", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "'data/pos.txt'", ")", ")", "or", "(", "not", "os", ".", "path", ".", "exists", "(", "'data/neg'", ")", ")", ":", "os", ".", "system", "(", "\"wget -q https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/example/chinese_text.zip \"", "\"-P data/\"", ")", "os", ".", "chdir", "(", "\"./data\"", ")", "os", ".", "system", "(", "\"unzip -u chinese_text.zip\"", ")", "os", ".", "chdir", "(", "\"..\"", ")" ]
Download the chinese_text dataset and unzip it
[ "Download", "the", "chinese_text", "dataset", "and", "unzip", "it" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_chinese_text_classification/data_helpers.py#L51-L61
24,160
apache/incubator-mxnet
example/ssd/train/metric.py
MultiBoxMetric.update
def update(self, labels, preds): """ Implementation of updating metrics """ # get generated multi label from network cls_prob = preds[0].asnumpy() loc_loss = preds[1].asnumpy() cls_label = preds[2].asnumpy() valid_count = np.sum(cls_label >= 0) # overall accuracy & object accuracy label = cls_label.flatten() mask = np.where(label >= 0)[0] indices = np.int64(label[mask]) prob = cls_prob.transpose((0, 2, 1)).reshape((-1, cls_prob.shape[1])) prob = prob[mask, indices] self.sum_metric[0] += (-np.log(prob + self.eps)).sum() self.num_inst[0] += valid_count # smoothl1loss self.sum_metric[1] += np.sum(loc_loss) self.num_inst[1] += valid_count
python
def update(self, labels, preds): """ Implementation of updating metrics """ # get generated multi label from network cls_prob = preds[0].asnumpy() loc_loss = preds[1].asnumpy() cls_label = preds[2].asnumpy() valid_count = np.sum(cls_label >= 0) # overall accuracy & object accuracy label = cls_label.flatten() mask = np.where(label >= 0)[0] indices = np.int64(label[mask]) prob = cls_prob.transpose((0, 2, 1)).reshape((-1, cls_prob.shape[1])) prob = prob[mask, indices] self.sum_metric[0] += (-np.log(prob + self.eps)).sum() self.num_inst[0] += valid_count # smoothl1loss self.sum_metric[1] += np.sum(loc_loss) self.num_inst[1] += valid_count
[ "def", "update", "(", "self", ",", "labels", ",", "preds", ")", ":", "# get generated multi label from network", "cls_prob", "=", "preds", "[", "0", "]", ".", "asnumpy", "(", ")", "loc_loss", "=", "preds", "[", "1", "]", ".", "asnumpy", "(", ")", "cls_label", "=", "preds", "[", "2", "]", ".", "asnumpy", "(", ")", "valid_count", "=", "np", ".", "sum", "(", "cls_label", ">=", "0", ")", "# overall accuracy & object accuracy", "label", "=", "cls_label", ".", "flatten", "(", ")", "mask", "=", "np", ".", "where", "(", "label", ">=", "0", ")", "[", "0", "]", "indices", "=", "np", ".", "int64", "(", "label", "[", "mask", "]", ")", "prob", "=", "cls_prob", ".", "transpose", "(", "(", "0", ",", "2", ",", "1", ")", ")", ".", "reshape", "(", "(", "-", "1", ",", "cls_prob", ".", "shape", "[", "1", "]", ")", ")", "prob", "=", "prob", "[", "mask", ",", "indices", "]", "self", ".", "sum_metric", "[", "0", "]", "+=", "(", "-", "np", ".", "log", "(", "prob", "+", "self", ".", "eps", ")", ")", ".", "sum", "(", ")", "self", ".", "num_inst", "[", "0", "]", "+=", "valid_count", "# smoothl1loss", "self", ".", "sum_metric", "[", "1", "]", "+=", "np", ".", "sum", "(", "loc_loss", ")", "self", ".", "num_inst", "[", "1", "]", "+=", "valid_count" ]
Implementation of updating metrics
[ "Implementation", "of", "updating", "metrics" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/metric.py#L53-L72
24,161
apache/incubator-mxnet
example/ssd/train/metric.py
MultiBoxMetric.get
def get(self): """Get the current evaluation result. Override the default behavior Returns ------- name : str Name of the metric. value : float Value of the evaluation. """ if self.num is None: if self.num_inst == 0: return (self.name, float('nan')) else: return (self.name, self.sum_metric / self.num_inst) else: names = ['%s'%(self.name[i]) for i in range(self.num)] values = [x / y if y != 0 else float('nan') \ for x, y in zip(self.sum_metric, self.num_inst)] return (names, values)
python
def get(self): """Get the current evaluation result. Override the default behavior Returns ------- name : str Name of the metric. value : float Value of the evaluation. """ if self.num is None: if self.num_inst == 0: return (self.name, float('nan')) else: return (self.name, self.sum_metric / self.num_inst) else: names = ['%s'%(self.name[i]) for i in range(self.num)] values = [x / y if y != 0 else float('nan') \ for x, y in zip(self.sum_metric, self.num_inst)] return (names, values)
[ "def", "get", "(", "self", ")", ":", "if", "self", ".", "num", "is", "None", ":", "if", "self", ".", "num_inst", "==", "0", ":", "return", "(", "self", ".", "name", ",", "float", "(", "'nan'", ")", ")", "else", ":", "return", "(", "self", ".", "name", ",", "self", ".", "sum_metric", "/", "self", ".", "num_inst", ")", "else", ":", "names", "=", "[", "'%s'", "%", "(", "self", ".", "name", "[", "i", "]", ")", "for", "i", "in", "range", "(", "self", ".", "num", ")", "]", "values", "=", "[", "x", "/", "y", "if", "y", "!=", "0", "else", "float", "(", "'nan'", ")", "for", "x", ",", "y", "in", "zip", "(", "self", ".", "sum_metric", ",", "self", ".", "num_inst", ")", "]", "return", "(", "names", ",", "values", ")" ]
Get the current evaluation result. Override the default behavior Returns ------- name : str Name of the metric. value : float Value of the evaluation.
[ "Get", "the", "current", "evaluation", "result", ".", "Override", "the", "default", "behavior" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/metric.py#L74-L94
24,162
apache/incubator-mxnet
python/mxnet/executor.py
Executor._get_dict
def _get_dict(names, ndarrays): """Get the dictionary given name and ndarray pairs.""" nset = set() for nm in names: if nm in nset: raise ValueError('Duplicate names detected, %s' % str(names)) nset.add(nm) return dict(zip(names, ndarrays))
python
def _get_dict(names, ndarrays): """Get the dictionary given name and ndarray pairs.""" nset = set() for nm in names: if nm in nset: raise ValueError('Duplicate names detected, %s' % str(names)) nset.add(nm) return dict(zip(names, ndarrays))
[ "def", "_get_dict", "(", "names", ",", "ndarrays", ")", ":", "nset", "=", "set", "(", ")", "for", "nm", "in", "names", ":", "if", "nm", "in", "nset", ":", "raise", "ValueError", "(", "'Duplicate names detected, %s'", "%", "str", "(", "names", ")", ")", "nset", ".", "add", "(", "nm", ")", "return", "dict", "(", "zip", "(", "names", ",", "ndarrays", ")", ")" ]
Get the dictionary given name and ndarray pairs.
[ "Get", "the", "dictionary", "given", "name", "and", "ndarray", "pairs", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L90-L97
24,163
apache/incubator-mxnet
python/mxnet/executor.py
Executor._get_outputs
def _get_outputs(self): """List all the output NDArray. Returns ------- A list of ndarray bound to the heads of executor. """ out_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() check_call(_LIB.MXExecutorOutputs(self.handle, ctypes.byref(out_size), ctypes.byref(handles))) num_output = out_size.value outputs = [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(num_output)] return outputs
python
def _get_outputs(self): """List all the output NDArray. Returns ------- A list of ndarray bound to the heads of executor. """ out_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() check_call(_LIB.MXExecutorOutputs(self.handle, ctypes.byref(out_size), ctypes.byref(handles))) num_output = out_size.value outputs = [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(num_output)] return outputs
[ "def", "_get_outputs", "(", "self", ")", ":", "out_size", "=", "mx_uint", "(", ")", "handles", "=", "ctypes", ".", "POINTER", "(", "NDArrayHandle", ")", "(", ")", "check_call", "(", "_LIB", ".", "MXExecutorOutputs", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "out_size", ")", ",", "ctypes", ".", "byref", "(", "handles", ")", ")", ")", "num_output", "=", "out_size", ".", "value", "outputs", "=", "[", "_ndarray_cls", "(", "NDArrayHandle", "(", "handles", "[", "i", "]", ")", ")", "for", "i", "in", "range", "(", "num_output", ")", "]", "return", "outputs" ]
List all the output NDArray. Returns ------- A list of ndarray bound to the heads of executor.
[ "List", "all", "the", "output", "NDArray", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L99-L112
24,164
apache/incubator-mxnet
python/mxnet/executor.py
Executor.forward
def forward(self, is_train=False, **kwargs): """Calculate the outputs specified by the bound symbol. Parameters ---------- is_train: bool, optional Whether this forward is for evaluation purpose. If True, a backward call is expected to follow. **kwargs Additional specification of input arguments. Examples -------- >>> # doing forward by specifying data >>> texec.forward(is_train=True, data=mydata) >>> # doing forward by not specifying things, but copy to the executor before hand >>> mydata.copyto(texec.arg_dict['data']) >>> texec.forward(is_train=True) >>> # doing forward by specifying data and get outputs >>> outputs = texec.forward(is_train=True, data=mydata) >>> print(outputs[0].asnumpy()) """ if len(kwargs) != 0: arg_dict = self.arg_dict for name, array in kwargs.items(): if not isinstance(array, (NDArray, np.ndarray)): raise ValueError('only accept keyword argument of NDArrays and numpy.ndarray') if name not in arg_dict: raise TypeError('Unknown argument %s' % name) if arg_dict[name].shape != array.shape: raise ValueError('Shape not match! Argument %s, need: %s, received: %s' %(name, str(arg_dict[name].shape), str(array.shape))) arg_dict[name][:] = array check_call(_LIB.MXExecutorForward( self.handle, ctypes.c_int(int(is_train)))) return self.outputs
python
def forward(self, is_train=False, **kwargs): """Calculate the outputs specified by the bound symbol. Parameters ---------- is_train: bool, optional Whether this forward is for evaluation purpose. If True, a backward call is expected to follow. **kwargs Additional specification of input arguments. Examples -------- >>> # doing forward by specifying data >>> texec.forward(is_train=True, data=mydata) >>> # doing forward by not specifying things, but copy to the executor before hand >>> mydata.copyto(texec.arg_dict['data']) >>> texec.forward(is_train=True) >>> # doing forward by specifying data and get outputs >>> outputs = texec.forward(is_train=True, data=mydata) >>> print(outputs[0].asnumpy()) """ if len(kwargs) != 0: arg_dict = self.arg_dict for name, array in kwargs.items(): if not isinstance(array, (NDArray, np.ndarray)): raise ValueError('only accept keyword argument of NDArrays and numpy.ndarray') if name not in arg_dict: raise TypeError('Unknown argument %s' % name) if arg_dict[name].shape != array.shape: raise ValueError('Shape not match! Argument %s, need: %s, received: %s' %(name, str(arg_dict[name].shape), str(array.shape))) arg_dict[name][:] = array check_call(_LIB.MXExecutorForward( self.handle, ctypes.c_int(int(is_train)))) return self.outputs
[ "def", "forward", "(", "self", ",", "is_train", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "kwargs", ")", "!=", "0", ":", "arg_dict", "=", "self", ".", "arg_dict", "for", "name", ",", "array", "in", "kwargs", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "array", ",", "(", "NDArray", ",", "np", ".", "ndarray", ")", ")", ":", "raise", "ValueError", "(", "'only accept keyword argument of NDArrays and numpy.ndarray'", ")", "if", "name", "not", "in", "arg_dict", ":", "raise", "TypeError", "(", "'Unknown argument %s'", "%", "name", ")", "if", "arg_dict", "[", "name", "]", ".", "shape", "!=", "array", ".", "shape", ":", "raise", "ValueError", "(", "'Shape not match! Argument %s, need: %s, received: %s'", "%", "(", "name", ",", "str", "(", "arg_dict", "[", "name", "]", ".", "shape", ")", ",", "str", "(", "array", ".", "shape", ")", ")", ")", "arg_dict", "[", "name", "]", "[", ":", "]", "=", "array", "check_call", "(", "_LIB", ".", "MXExecutorForward", "(", "self", ".", "handle", ",", "ctypes", ".", "c_int", "(", "int", "(", "is_train", ")", ")", ")", ")", "return", "self", ".", "outputs" ]
Calculate the outputs specified by the bound symbol. Parameters ---------- is_train: bool, optional Whether this forward is for evaluation purpose. If True, a backward call is expected to follow. **kwargs Additional specification of input arguments. Examples -------- >>> # doing forward by specifying data >>> texec.forward(is_train=True, data=mydata) >>> # doing forward by not specifying things, but copy to the executor before hand >>> mydata.copyto(texec.arg_dict['data']) >>> texec.forward(is_train=True) >>> # doing forward by specifying data and get outputs >>> outputs = texec.forward(is_train=True, data=mydata) >>> print(outputs[0].asnumpy())
[ "Calculate", "the", "outputs", "specified", "by", "the", "bound", "symbol", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L114-L153
24,165
apache/incubator-mxnet
python/mxnet/executor.py
Executor.backward
def backward(self, out_grads=None, is_train=True): """Do backward pass to get the gradient of arguments. Parameters ---------- out_grads : NDArray or list of NDArray or dict of str to NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. is_train : bool, default True Whether this backward is for training or inference. Note that in rare cases you want to call backward with is_train=False to get gradient during inference. Examples -------- >>> # Example for binding on loss function symbol, which gives the loss value of the model. >>> # Equivalently it gives the head gradient for backward pass. >>> # In this example the built-in SoftmaxOutput is used as loss function. >>> # MakeLoss can be used to define customized loss function symbol. >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6) >>> net = mx.sym.Activation(net, name='relu', act_type="relu") >>> net = mx.sym.SoftmaxOutput(net, name='softmax') >>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)), >>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))} >>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))} >>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print out.asnumpy() [[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]] >>> texec.backward() >>> print(texec.grad_arrays[1].asnumpy()) [[ 0.00378404 0.00378404 0.00378404 0.00378404] [-0.92399555 -0.92399555 -0.92399555 -0.92399555] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.20660152 0.20660152 0.20660152 0.20660152] [ 0.5616011 0.5616011 0.5616011 0.5616011 ]] >>> >>> # Example for binding on non-loss function symbol. >>> # Here the binding symbol is neither built-in loss function >>> # nor customized loss created by MakeLoss. >>> # As a result the head gradient is not automatically provided. >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> # c is not a loss function symbol >>> c = 2 * a + b >>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])} >>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))} >>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print(out.asnumpy()) [ 4. 7.] >>> # out_grads is the head gradient in backward pass. >>> # Here we define 'c' as loss function. >>> # Then 'out' is passed as head gradient of backward pass. >>> texec.backward(out) >>> print(texec.grad_arrays[0].asnumpy()) [ 8. 14.] >>> print(texec.grad_arrays[1].asnumpy()) [ 4. 7.] """ if out_grads is None: out_grads = [] elif isinstance(out_grads, NDArray): out_grads = [out_grads] elif isinstance(out_grads, dict): out_grads = [out_grads[k] for k in self._symbol.list_outputs()] for obj in out_grads: if not isinstance(obj, NDArray): raise TypeError("inputs must be NDArray") ndarray = c_handle_array(out_grads) check_call(_LIB.MXExecutorBackwardEx( self.handle, mx_uint(len(out_grads)), ndarray, ctypes.c_int(is_train)))
python
def backward(self, out_grads=None, is_train=True): """Do backward pass to get the gradient of arguments. Parameters ---------- out_grads : NDArray or list of NDArray or dict of str to NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. is_train : bool, default True Whether this backward is for training or inference. Note that in rare cases you want to call backward with is_train=False to get gradient during inference. Examples -------- >>> # Example for binding on loss function symbol, which gives the loss value of the model. >>> # Equivalently it gives the head gradient for backward pass. >>> # In this example the built-in SoftmaxOutput is used as loss function. >>> # MakeLoss can be used to define customized loss function symbol. >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6) >>> net = mx.sym.Activation(net, name='relu', act_type="relu") >>> net = mx.sym.SoftmaxOutput(net, name='softmax') >>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)), >>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))} >>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))} >>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print out.asnumpy() [[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]] >>> texec.backward() >>> print(texec.grad_arrays[1].asnumpy()) [[ 0.00378404 0.00378404 0.00378404 0.00378404] [-0.92399555 -0.92399555 -0.92399555 -0.92399555] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.20660152 0.20660152 0.20660152 0.20660152] [ 0.5616011 0.5616011 0.5616011 0.5616011 ]] >>> >>> # Example for binding on non-loss function symbol. >>> # Here the binding symbol is neither built-in loss function >>> # nor customized loss created by MakeLoss. >>> # As a result the head gradient is not automatically provided. >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> # c is not a loss function symbol >>> c = 2 * a + b >>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])} >>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))} >>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print(out.asnumpy()) [ 4. 7.] >>> # out_grads is the head gradient in backward pass. >>> # Here we define 'c' as loss function. >>> # Then 'out' is passed as head gradient of backward pass. >>> texec.backward(out) >>> print(texec.grad_arrays[0].asnumpy()) [ 8. 14.] >>> print(texec.grad_arrays[1].asnumpy()) [ 4. 7.] """ if out_grads is None: out_grads = [] elif isinstance(out_grads, NDArray): out_grads = [out_grads] elif isinstance(out_grads, dict): out_grads = [out_grads[k] for k in self._symbol.list_outputs()] for obj in out_grads: if not isinstance(obj, NDArray): raise TypeError("inputs must be NDArray") ndarray = c_handle_array(out_grads) check_call(_LIB.MXExecutorBackwardEx( self.handle, mx_uint(len(out_grads)), ndarray, ctypes.c_int(is_train)))
[ "def", "backward", "(", "self", ",", "out_grads", "=", "None", ",", "is_train", "=", "True", ")", ":", "if", "out_grads", "is", "None", ":", "out_grads", "=", "[", "]", "elif", "isinstance", "(", "out_grads", ",", "NDArray", ")", ":", "out_grads", "=", "[", "out_grads", "]", "elif", "isinstance", "(", "out_grads", ",", "dict", ")", ":", "out_grads", "=", "[", "out_grads", "[", "k", "]", "for", "k", "in", "self", ".", "_symbol", ".", "list_outputs", "(", ")", "]", "for", "obj", "in", "out_grads", ":", "if", "not", "isinstance", "(", "obj", ",", "NDArray", ")", ":", "raise", "TypeError", "(", "\"inputs must be NDArray\"", ")", "ndarray", "=", "c_handle_array", "(", "out_grads", ")", "check_call", "(", "_LIB", ".", "MXExecutorBackwardEx", "(", "self", ".", "handle", ",", "mx_uint", "(", "len", "(", "out_grads", ")", ")", ",", "ndarray", ",", "ctypes", ".", "c_int", "(", "is_train", ")", ")", ")" ]
Do backward pass to get the gradient of arguments. Parameters ---------- out_grads : NDArray or list of NDArray or dict of str to NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. is_train : bool, default True Whether this backward is for training or inference. Note that in rare cases you want to call backward with is_train=False to get gradient during inference. Examples -------- >>> # Example for binding on loss function symbol, which gives the loss value of the model. >>> # Equivalently it gives the head gradient for backward pass. >>> # In this example the built-in SoftmaxOutput is used as loss function. >>> # MakeLoss can be used to define customized loss function symbol. >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6) >>> net = mx.sym.Activation(net, name='relu', act_type="relu") >>> net = mx.sym.SoftmaxOutput(net, name='softmax') >>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)), >>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))} >>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))} >>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print out.asnumpy() [[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]] >>> texec.backward() >>> print(texec.grad_arrays[1].asnumpy()) [[ 0.00378404 0.00378404 0.00378404 0.00378404] [-0.92399555 -0.92399555 -0.92399555 -0.92399555] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.20660152 0.20660152 0.20660152 0.20660152] [ 0.5616011 0.5616011 0.5616011 0.5616011 ]] >>> >>> # Example for binding on non-loss function symbol. >>> # Here the binding symbol is neither built-in loss function >>> # nor customized loss created by MakeLoss. >>> # As a result the head gradient is not automatically provided. >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> # c is not a loss function symbol >>> c = 2 * a + b >>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])} >>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))} >>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print(out.asnumpy()) [ 4. 7.] >>> # out_grads is the head gradient in backward pass. >>> # Here we define 'c' as loss function. >>> # Then 'out' is passed as head gradient of backward pass. >>> texec.backward(out) >>> print(texec.grad_arrays[0].asnumpy()) [ 8. 14.] >>> print(texec.grad_arrays[1].asnumpy()) [ 4. 7.]
[ "Do", "backward", "pass", "to", "get", "the", "gradient", "of", "arguments", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L155-L235
24,166
apache/incubator-mxnet
python/mxnet/executor.py
Executor.set_monitor_callback
def set_monitor_callback(self, callback, monitor_all=False): """Install callback for monitor. Parameters ---------- callback : function Takes a string and an NDArrayHandle. monitor_all : bool, default False If true, monitor both input and output, otherwise monitor output only. Examples -------- >>> def mon_callback(*args, **kwargs): >>> print("Do your stuff here.") >>> >>> texe.set_monitor_callback(mon_callback) """ cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p) self._monitor_callback = cb_type(_monitor_callback_wrapper(callback)) check_call(_LIB.MXExecutorSetMonitorCallbackEX( self.handle, self._monitor_callback, None, ctypes.c_int(monitor_all)))
python
def set_monitor_callback(self, callback, monitor_all=False): """Install callback for monitor. Parameters ---------- callback : function Takes a string and an NDArrayHandle. monitor_all : bool, default False If true, monitor both input and output, otherwise monitor output only. Examples -------- >>> def mon_callback(*args, **kwargs): >>> print("Do your stuff here.") >>> >>> texe.set_monitor_callback(mon_callback) """ cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p) self._monitor_callback = cb_type(_monitor_callback_wrapper(callback)) check_call(_LIB.MXExecutorSetMonitorCallbackEX( self.handle, self._monitor_callback, None, ctypes.c_int(monitor_all)))
[ "def", "set_monitor_callback", "(", "self", ",", "callback", ",", "monitor_all", "=", "False", ")", ":", "cb_type", "=", "ctypes", ".", "CFUNCTYPE", "(", "None", ",", "ctypes", ".", "c_char_p", ",", "NDArrayHandle", ",", "ctypes", ".", "c_void_p", ")", "self", ".", "_monitor_callback", "=", "cb_type", "(", "_monitor_callback_wrapper", "(", "callback", ")", ")", "check_call", "(", "_LIB", ".", "MXExecutorSetMonitorCallbackEX", "(", "self", ".", "handle", ",", "self", ".", "_monitor_callback", ",", "None", ",", "ctypes", ".", "c_int", "(", "monitor_all", ")", ")", ")" ]
Install callback for monitor. Parameters ---------- callback : function Takes a string and an NDArrayHandle. monitor_all : bool, default False If true, monitor both input and output, otherwise monitor output only. Examples -------- >>> def mon_callback(*args, **kwargs): >>> print("Do your stuff here.") >>> >>> texe.set_monitor_callback(mon_callback)
[ "Install", "callback", "for", "monitor", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L237-L260
24,167
apache/incubator-mxnet
python/mxnet/executor.py
Executor.arg_dict
def arg_dict(self): """Get dictionary representation of argument arrrays. Returns ------- arg_dict : dict of str to NDArray The dictionary that maps the names of arguments to NDArrays. Raises ------ ValueError : if there are duplicated names in the arguments. """ if self._arg_dict is None: self._arg_dict = Executor._get_dict( self._symbol.list_arguments(), self.arg_arrays) return self._arg_dict
python
def arg_dict(self): """Get dictionary representation of argument arrrays. Returns ------- arg_dict : dict of str to NDArray The dictionary that maps the names of arguments to NDArrays. Raises ------ ValueError : if there are duplicated names in the arguments. """ if self._arg_dict is None: self._arg_dict = Executor._get_dict( self._symbol.list_arguments(), self.arg_arrays) return self._arg_dict
[ "def", "arg_dict", "(", "self", ")", ":", "if", "self", ".", "_arg_dict", "is", "None", ":", "self", ".", "_arg_dict", "=", "Executor", ".", "_get_dict", "(", "self", ".", "_symbol", ".", "list_arguments", "(", ")", ",", "self", ".", "arg_arrays", ")", "return", "self", ".", "_arg_dict" ]
Get dictionary representation of argument arrrays. Returns ------- arg_dict : dict of str to NDArray The dictionary that maps the names of arguments to NDArrays. Raises ------ ValueError : if there are duplicated names in the arguments.
[ "Get", "dictionary", "representation", "of", "argument", "arrrays", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L263-L278
24,168
apache/incubator-mxnet
python/mxnet/executor.py
Executor.grad_dict
def grad_dict(self): """Get dictionary representation of gradient arrays. Returns ------- grad_dict : dict of str to NDArray The dictionary that maps name of arguments to gradient arrays. """ if self._grad_dict is None: self._grad_dict = Executor._get_dict( self._symbol.list_arguments(), self.grad_arrays) return self._grad_dict
python
def grad_dict(self): """Get dictionary representation of gradient arrays. Returns ------- grad_dict : dict of str to NDArray The dictionary that maps name of arguments to gradient arrays. """ if self._grad_dict is None: self._grad_dict = Executor._get_dict( self._symbol.list_arguments(), self.grad_arrays) return self._grad_dict
[ "def", "grad_dict", "(", "self", ")", ":", "if", "self", ".", "_grad_dict", "is", "None", ":", "self", ".", "_grad_dict", "=", "Executor", ".", "_get_dict", "(", "self", ".", "_symbol", ".", "list_arguments", "(", ")", ",", "self", ".", "grad_arrays", ")", "return", "self", ".", "_grad_dict" ]
Get dictionary representation of gradient arrays. Returns ------- grad_dict : dict of str to NDArray The dictionary that maps name of arguments to gradient arrays.
[ "Get", "dictionary", "representation", "of", "gradient", "arrays", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L281-L292
24,169
apache/incubator-mxnet
python/mxnet/executor.py
Executor.aux_dict
def aux_dict(self): """Get dictionary representation of auxiliary states arrays. Returns ------- aux_dict : dict of str to NDArray The dictionary that maps name of auxiliary states to NDArrays. Raises ------ ValueError : if there are duplicated names in the auxiliary states. """ if self._aux_dict is None: self._aux_dict = Executor._get_dict( self._symbol.list_auxiliary_states(), self.aux_arrays) return self._aux_dict
python
def aux_dict(self): """Get dictionary representation of auxiliary states arrays. Returns ------- aux_dict : dict of str to NDArray The dictionary that maps name of auxiliary states to NDArrays. Raises ------ ValueError : if there are duplicated names in the auxiliary states. """ if self._aux_dict is None: self._aux_dict = Executor._get_dict( self._symbol.list_auxiliary_states(), self.aux_arrays) return self._aux_dict
[ "def", "aux_dict", "(", "self", ")", ":", "if", "self", ".", "_aux_dict", "is", "None", ":", "self", ".", "_aux_dict", "=", "Executor", ".", "_get_dict", "(", "self", ".", "_symbol", ".", "list_auxiliary_states", "(", ")", ",", "self", ".", "aux_arrays", ")", "return", "self", ".", "_aux_dict" ]
Get dictionary representation of auxiliary states arrays. Returns ------- aux_dict : dict of str to NDArray The dictionary that maps name of auxiliary states to NDArrays. Raises ------ ValueError : if there are duplicated names in the auxiliary states.
[ "Get", "dictionary", "representation", "of", "auxiliary", "states", "arrays", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L295-L310
24,170
apache/incubator-mxnet
python/mxnet/executor.py
Executor.output_dict
def output_dict(self): """Get dictionary representation of output arrays. Returns ------- output_dict : dict of str to NDArray The dictionary that maps name of output names to NDArrays. Raises ------ ValueError : if there are duplicated names in the outputs. """ if self._output_dict is None: self._output_dict = Executor._get_dict( self._symbol.list_outputs(), self.outputs) return self._output_dict
python
def output_dict(self): """Get dictionary representation of output arrays. Returns ------- output_dict : dict of str to NDArray The dictionary that maps name of output names to NDArrays. Raises ------ ValueError : if there are duplicated names in the outputs. """ if self._output_dict is None: self._output_dict = Executor._get_dict( self._symbol.list_outputs(), self.outputs) return self._output_dict
[ "def", "output_dict", "(", "self", ")", ":", "if", "self", ".", "_output_dict", "is", "None", ":", "self", ".", "_output_dict", "=", "Executor", ".", "_get_dict", "(", "self", ".", "_symbol", ".", "list_outputs", "(", ")", ",", "self", ".", "outputs", ")", "return", "self", ".", "_output_dict" ]
Get dictionary representation of output arrays. Returns ------- output_dict : dict of str to NDArray The dictionary that maps name of output names to NDArrays. Raises ------ ValueError : if there are duplicated names in the outputs.
[ "Get", "dictionary", "representation", "of", "output", "arrays", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L313-L328
24,171
apache/incubator-mxnet
python/mxnet/executor.py
Executor.copy_params_from
def copy_params_from(self, arg_params, aux_params=None, allow_extra_params=False): """Copy parameters from arg_params, aux_params into executor's internal array. Parameters ---------- arg_params : dict of str to NDArray Parameters, dict of name to NDArray of arguments. aux_params : dict of str to NDArray, optional Parameters, dict of name to NDArray of auxiliary states. allow_extra_params : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. Raises ------ ValueError If there is additional parameters in the dict but ``allow_extra_params=False``. Examples -------- >>> # set parameters with existing model checkpoint >>> model_prefix = 'mx_mlp' >>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0) >>> texec.copy_params_from(arg_params, aux_params) """ for name, array in arg_params.items(): if name in self.arg_dict: dst = self.arg_dict[name] array.astype(dst.dtype).copyto(dst) elif not allow_extra_params: raise ValueError('Find name \"%s\" that is not in the arguments' % name) if aux_params is None: return for name, array in aux_params.items(): if name in self.aux_dict: dst = self.aux_dict[name] array.astype(dst.dtype).copyto(dst) elif not allow_extra_params: raise ValueError('Find name %s that is not in the auxiliary states' % name)
python
def copy_params_from(self, arg_params, aux_params=None, allow_extra_params=False): """Copy parameters from arg_params, aux_params into executor's internal array. Parameters ---------- arg_params : dict of str to NDArray Parameters, dict of name to NDArray of arguments. aux_params : dict of str to NDArray, optional Parameters, dict of name to NDArray of auxiliary states. allow_extra_params : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. Raises ------ ValueError If there is additional parameters in the dict but ``allow_extra_params=False``. Examples -------- >>> # set parameters with existing model checkpoint >>> model_prefix = 'mx_mlp' >>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0) >>> texec.copy_params_from(arg_params, aux_params) """ for name, array in arg_params.items(): if name in self.arg_dict: dst = self.arg_dict[name] array.astype(dst.dtype).copyto(dst) elif not allow_extra_params: raise ValueError('Find name \"%s\" that is not in the arguments' % name) if aux_params is None: return for name, array in aux_params.items(): if name in self.aux_dict: dst = self.aux_dict[name] array.astype(dst.dtype).copyto(dst) elif not allow_extra_params: raise ValueError('Find name %s that is not in the auxiliary states' % name)
[ "def", "copy_params_from", "(", "self", ",", "arg_params", ",", "aux_params", "=", "None", ",", "allow_extra_params", "=", "False", ")", ":", "for", "name", ",", "array", "in", "arg_params", ".", "items", "(", ")", ":", "if", "name", "in", "self", ".", "arg_dict", ":", "dst", "=", "self", ".", "arg_dict", "[", "name", "]", "array", ".", "astype", "(", "dst", ".", "dtype", ")", ".", "copyto", "(", "dst", ")", "elif", "not", "allow_extra_params", ":", "raise", "ValueError", "(", "'Find name \\\"%s\\\" that is not in the arguments'", "%", "name", ")", "if", "aux_params", "is", "None", ":", "return", "for", "name", ",", "array", "in", "aux_params", ".", "items", "(", ")", ":", "if", "name", "in", "self", ".", "aux_dict", ":", "dst", "=", "self", ".", "aux_dict", "[", "name", "]", "array", ".", "astype", "(", "dst", ".", "dtype", ")", ".", "copyto", "(", "dst", ")", "elif", "not", "allow_extra_params", ":", "raise", "ValueError", "(", "'Find name %s that is not in the auxiliary states'", "%", "name", ")" ]
Copy parameters from arg_params, aux_params into executor's internal array. Parameters ---------- arg_params : dict of str to NDArray Parameters, dict of name to NDArray of arguments. aux_params : dict of str to NDArray, optional Parameters, dict of name to NDArray of auxiliary states. allow_extra_params : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. Raises ------ ValueError If there is additional parameters in the dict but ``allow_extra_params=False``. Examples -------- >>> # set parameters with existing model checkpoint >>> model_prefix = 'mx_mlp' >>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0) >>> texec.copy_params_from(arg_params, aux_params)
[ "Copy", "parameters", "from", "arg_params", "aux_params", "into", "executor", "s", "internal", "array", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L330-L373
24,172
apache/incubator-mxnet
python/mxnet/executor.py
Executor.debug_str
def debug_str(self): """Get a debug string about internal execution plan. Returns ------- debug_str : string Debug string of the executor. Examples -------- >>> a = mx.sym.Variable('a') >>> b = mx.sym.sin(a) >>> c = 2 * a + b >>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}) >>> print(texec.debug_str()) Symbol Outputs: output[0]=_plus0(0) Variable:a -------------------- Op:_mul_scalar, Name=_mulscalar0 Inputs: arg[0]=a(0) version=0 Attrs: scalar=2 -------------------- Op:sin, Name=sin0 Inputs: arg[0]=a(0) version=0 -------------------- Op:elemwise_add, Name=_plus0 Inputs: arg[0]=_mulscalar0(0) arg[1]=sin0(0) Total 0 MB allocated Total 11 TempSpace resource requested """ debug_str = ctypes.c_char_p() check_call(_LIB.MXExecutorPrint( self.handle, ctypes.byref(debug_str))) return py_str(debug_str.value)
python
def debug_str(self): """Get a debug string about internal execution plan. Returns ------- debug_str : string Debug string of the executor. Examples -------- >>> a = mx.sym.Variable('a') >>> b = mx.sym.sin(a) >>> c = 2 * a + b >>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}) >>> print(texec.debug_str()) Symbol Outputs: output[0]=_plus0(0) Variable:a -------------------- Op:_mul_scalar, Name=_mulscalar0 Inputs: arg[0]=a(0) version=0 Attrs: scalar=2 -------------------- Op:sin, Name=sin0 Inputs: arg[0]=a(0) version=0 -------------------- Op:elemwise_add, Name=_plus0 Inputs: arg[0]=_mulscalar0(0) arg[1]=sin0(0) Total 0 MB allocated Total 11 TempSpace resource requested """ debug_str = ctypes.c_char_p() check_call(_LIB.MXExecutorPrint( self.handle, ctypes.byref(debug_str))) return py_str(debug_str.value)
[ "def", "debug_str", "(", "self", ")", ":", "debug_str", "=", "ctypes", ".", "c_char_p", "(", ")", "check_call", "(", "_LIB", ".", "MXExecutorPrint", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "debug_str", ")", ")", ")", "return", "py_str", "(", "debug_str", ".", "value", ")" ]
Get a debug string about internal execution plan. Returns ------- debug_str : string Debug string of the executor. Examples -------- >>> a = mx.sym.Variable('a') >>> b = mx.sym.sin(a) >>> c = 2 * a + b >>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}) >>> print(texec.debug_str()) Symbol Outputs: output[0]=_plus0(0) Variable:a -------------------- Op:_mul_scalar, Name=_mulscalar0 Inputs: arg[0]=a(0) version=0 Attrs: scalar=2 -------------------- Op:sin, Name=sin0 Inputs: arg[0]=a(0) version=0 -------------------- Op:elemwise_add, Name=_plus0 Inputs: arg[0]=_mulscalar0(0) arg[1]=sin0(0) Total 0 MB allocated Total 11 TempSpace resource requested
[ "Get", "a", "debug", "string", "about", "internal", "execution", "plan", "." ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor.py#L474-L513
24,173
apache/incubator-mxnet
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
MXNetGraph.convert_layer
def convert_layer(node, **kwargs): """Convert MXNet layer to ONNX""" op = str(node["op"]) if op not in MXNetGraph.registry_: raise AttributeError("No conversion function registered for op type %s yet." % op) convert_func = MXNetGraph.registry_[op] return convert_func(node, **kwargs)
python
def convert_layer(node, **kwargs): """Convert MXNet layer to ONNX""" op = str(node["op"]) if op not in MXNetGraph.registry_: raise AttributeError("No conversion function registered for op type %s yet." % op) convert_func = MXNetGraph.registry_[op] return convert_func(node, **kwargs)
[ "def", "convert_layer", "(", "node", ",", "*", "*", "kwargs", ")", ":", "op", "=", "str", "(", "node", "[", "\"op\"", "]", ")", "if", "op", "not", "in", "MXNetGraph", ".", "registry_", ":", "raise", "AttributeError", "(", "\"No conversion function registered for op type %s yet.\"", "%", "op", ")", "convert_func", "=", "MXNetGraph", ".", "registry_", "[", "op", "]", "return", "convert_func", "(", "node", ",", "*", "*", "kwargs", ")" ]
Convert MXNet layer to ONNX
[ "Convert", "MXNet", "layer", "to", "ONNX" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L86-L92
24,174
apache/incubator-mxnet
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
MXNetGraph.split_params
def split_params(sym, params): """Helper function to split params dictionary into args and aux params Parameters ---------- sym : :class:`~mxnet.symbol.Symbol` MXNet symbol object params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format Returns ------- arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format """ arg_params = {} aux_params = {} for args in sym.list_arguments(): if args in params: arg_params.update({args: nd.array(params[args])}) for aux in sym.list_auxiliary_states(): if aux in params: aux_params.update({aux: nd.array(params[aux])}) return arg_params, aux_params
python
def split_params(sym, params): """Helper function to split params dictionary into args and aux params Parameters ---------- sym : :class:`~mxnet.symbol.Symbol` MXNet symbol object params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format Returns ------- arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format """ arg_params = {} aux_params = {} for args in sym.list_arguments(): if args in params: arg_params.update({args: nd.array(params[args])}) for aux in sym.list_auxiliary_states(): if aux in params: aux_params.update({aux: nd.array(params[aux])}) return arg_params, aux_params
[ "def", "split_params", "(", "sym", ",", "params", ")", ":", "arg_params", "=", "{", "}", "aux_params", "=", "{", "}", "for", "args", "in", "sym", ".", "list_arguments", "(", ")", ":", "if", "args", "in", "params", ":", "arg_params", ".", "update", "(", "{", "args", ":", "nd", ".", "array", "(", "params", "[", "args", "]", ")", "}", ")", "for", "aux", "in", "sym", ".", "list_auxiliary_states", "(", ")", ":", "if", "aux", "in", "params", ":", "aux_params", ".", "update", "(", "{", "aux", ":", "nd", ".", "array", "(", "params", "[", "aux", "]", ")", "}", ")", "return", "arg_params", ",", "aux_params" ]
Helper function to split params dictionary into args and aux params Parameters ---------- sym : :class:`~mxnet.symbol.Symbol` MXNet symbol object params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format Returns ------- arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
[ "Helper", "function", "to", "split", "params", "dictionary", "into", "args", "and", "aux", "params" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L95-L120
24,175
apache/incubator-mxnet
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
MXNetGraph.get_outputs
def get_outputs(sym, params, in_shape, in_label): """ Infer output shapes and return dictionary of output name to shape :param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on :param dic of (str, nd.NDArray) params: :param list of tuple(int, ...) in_shape: list of all input shapes :param in_label: name of label typically used in loss that may be left in graph. This name is removed from list of inputs required by symbol :return: dictionary of output name to shape :rtype: dict of (str, tuple(int, ...)) """ # remove any input listed in params from sym.list_inputs() and bind them to the input shapes provided # by user. Also remove in_label, which is the name of the label symbol that may have been used # as the label for loss during training. inputs = {n: tuple(s) for n, s in zip([n for n in sym.list_inputs() if n not in params and n != in_label], in_shape)} # Add params and their shape to list of inputs inputs.update({n: v.shape for n, v in params.items() if n in sym.list_inputs()}) # Provide input data as well as input params to infer_shape() _, out_shapes, _ = sym.infer_shape(**inputs) out_names = list() for name in sym.list_outputs(): if name.endswith('_output'): out_names.append(name[:-len('_output')]) else: logging.info("output '%s' does not end with '_output'", name) out_names.append(name) assert len(out_shapes) == len(out_names) # bind output shapes with output names graph_outputs = {n: s for n, s in zip(out_names, out_shapes)} return graph_outputs
python
def get_outputs(sym, params, in_shape, in_label): """ Infer output shapes and return dictionary of output name to shape :param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on :param dic of (str, nd.NDArray) params: :param list of tuple(int, ...) in_shape: list of all input shapes :param in_label: name of label typically used in loss that may be left in graph. This name is removed from list of inputs required by symbol :return: dictionary of output name to shape :rtype: dict of (str, tuple(int, ...)) """ # remove any input listed in params from sym.list_inputs() and bind them to the input shapes provided # by user. Also remove in_label, which is the name of the label symbol that may have been used # as the label for loss during training. inputs = {n: tuple(s) for n, s in zip([n for n in sym.list_inputs() if n not in params and n != in_label], in_shape)} # Add params and their shape to list of inputs inputs.update({n: v.shape for n, v in params.items() if n in sym.list_inputs()}) # Provide input data as well as input params to infer_shape() _, out_shapes, _ = sym.infer_shape(**inputs) out_names = list() for name in sym.list_outputs(): if name.endswith('_output'): out_names.append(name[:-len('_output')]) else: logging.info("output '%s' does not end with '_output'", name) out_names.append(name) assert len(out_shapes) == len(out_names) # bind output shapes with output names graph_outputs = {n: s for n, s in zip(out_names, out_shapes)} return graph_outputs
[ "def", "get_outputs", "(", "sym", ",", "params", ",", "in_shape", ",", "in_label", ")", ":", "# remove any input listed in params from sym.list_inputs() and bind them to the input shapes provided", "# by user. Also remove in_label, which is the name of the label symbol that may have been used", "# as the label for loss during training.", "inputs", "=", "{", "n", ":", "tuple", "(", "s", ")", "for", "n", ",", "s", "in", "zip", "(", "[", "n", "for", "n", "in", "sym", ".", "list_inputs", "(", ")", "if", "n", "not", "in", "params", "and", "n", "!=", "in_label", "]", ",", "in_shape", ")", "}", "# Add params and their shape to list of inputs", "inputs", ".", "update", "(", "{", "n", ":", "v", ".", "shape", "for", "n", ",", "v", "in", "params", ".", "items", "(", ")", "if", "n", "in", "sym", ".", "list_inputs", "(", ")", "}", ")", "# Provide input data as well as input params to infer_shape()", "_", ",", "out_shapes", ",", "_", "=", "sym", ".", "infer_shape", "(", "*", "*", "inputs", ")", "out_names", "=", "list", "(", ")", "for", "name", "in", "sym", ".", "list_outputs", "(", ")", ":", "if", "name", ".", "endswith", "(", "'_output'", ")", ":", "out_names", ".", "append", "(", "name", "[", ":", "-", "len", "(", "'_output'", ")", "]", ")", "else", ":", "logging", ".", "info", "(", "\"output '%s' does not end with '_output'\"", ",", "name", ")", "out_names", ".", "append", "(", "name", ")", "assert", "len", "(", "out_shapes", ")", "==", "len", "(", "out_names", ")", "# bind output shapes with output names", "graph_outputs", "=", "{", "n", ":", "s", "for", "n", ",", "s", "in", "zip", "(", "out_names", ",", "out_shapes", ")", "}", "return", "graph_outputs" ]
Infer output shapes and return dictionary of output name to shape :param :class:`~mxnet.symbol.Symbol` sym: symbol to perform infer shape on :param dic of (str, nd.NDArray) params: :param list of tuple(int, ...) in_shape: list of all input shapes :param in_label: name of label typically used in loss that may be left in graph. This name is removed from list of inputs required by symbol :return: dictionary of output name to shape :rtype: dict of (str, tuple(int, ...))
[ "Infer", "output", "shapes", "and", "return", "dictionary", "of", "output", "name", "to", "shape" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L123-L156
24,176
apache/incubator-mxnet
python/mxnet/contrib/onnx/mx2onnx/export_onnx.py
MXNetGraph.convert_weights_to_numpy
def convert_weights_to_numpy(weights_dict): """Convert weights to numpy""" return dict([(k.replace("arg:", "").replace("aux:", ""), v.asnumpy()) for k, v in weights_dict.items()])
python
def convert_weights_to_numpy(weights_dict): """Convert weights to numpy""" return dict([(k.replace("arg:", "").replace("aux:", ""), v.asnumpy()) for k, v in weights_dict.items()])
[ "def", "convert_weights_to_numpy", "(", "weights_dict", ")", ":", "return", "dict", "(", "[", "(", "k", ".", "replace", "(", "\"arg:\"", ",", "\"\"", ")", ".", "replace", "(", "\"aux:\"", ",", "\"\"", ")", ",", "v", ".", "asnumpy", "(", ")", ")", "for", "k", ",", "v", "in", "weights_dict", ".", "items", "(", ")", "]", ")" ]
Convert weights to numpy
[ "Convert", "weights", "to", "numpy" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py#L159-L162
24,177
apache/incubator-mxnet
example/ssd/train/train_net.py
get_lr_scheduler
def get_lr_scheduler(learning_rate, lr_refactor_step, lr_refactor_ratio, num_example, batch_size, begin_epoch): """ Compute learning rate and refactor scheduler Parameters: --------- learning_rate : float original learning rate lr_refactor_step : comma separated str epochs to change learning rate lr_refactor_ratio : float lr *= ratio at certain steps num_example : int number of training images, used to estimate the iterations given epochs batch_size : int training batch size begin_epoch : int starting epoch Returns: --------- (learning_rate, mx.lr_scheduler) as tuple """ assert lr_refactor_ratio > 0 iter_refactor = [int(r) for r in lr_refactor_step.split(',') if r.strip()] if lr_refactor_ratio >= 1: return (learning_rate, None) else: lr = learning_rate epoch_size = num_example // batch_size for s in iter_refactor: if begin_epoch >= s: lr *= lr_refactor_ratio if lr != learning_rate: logging.getLogger().info("Adjusted learning rate to {} for epoch {}".format(lr, begin_epoch)) steps = [epoch_size * (x - begin_epoch) for x in iter_refactor if x > begin_epoch] if not steps: return (lr, None) lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=lr_refactor_ratio) return (lr, lr_scheduler)
python
def get_lr_scheduler(learning_rate, lr_refactor_step, lr_refactor_ratio, num_example, batch_size, begin_epoch): """ Compute learning rate and refactor scheduler Parameters: --------- learning_rate : float original learning rate lr_refactor_step : comma separated str epochs to change learning rate lr_refactor_ratio : float lr *= ratio at certain steps num_example : int number of training images, used to estimate the iterations given epochs batch_size : int training batch size begin_epoch : int starting epoch Returns: --------- (learning_rate, mx.lr_scheduler) as tuple """ assert lr_refactor_ratio > 0 iter_refactor = [int(r) for r in lr_refactor_step.split(',') if r.strip()] if lr_refactor_ratio >= 1: return (learning_rate, None) else: lr = learning_rate epoch_size = num_example // batch_size for s in iter_refactor: if begin_epoch >= s: lr *= lr_refactor_ratio if lr != learning_rate: logging.getLogger().info("Adjusted learning rate to {} for epoch {}".format(lr, begin_epoch)) steps = [epoch_size * (x - begin_epoch) for x in iter_refactor if x > begin_epoch] if not steps: return (lr, None) lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=lr_refactor_ratio) return (lr, lr_scheduler)
[ "def", "get_lr_scheduler", "(", "learning_rate", ",", "lr_refactor_step", ",", "lr_refactor_ratio", ",", "num_example", ",", "batch_size", ",", "begin_epoch", ")", ":", "assert", "lr_refactor_ratio", ">", "0", "iter_refactor", "=", "[", "int", "(", "r", ")", "for", "r", "in", "lr_refactor_step", ".", "split", "(", "','", ")", "if", "r", ".", "strip", "(", ")", "]", "if", "lr_refactor_ratio", ">=", "1", ":", "return", "(", "learning_rate", ",", "None", ")", "else", ":", "lr", "=", "learning_rate", "epoch_size", "=", "num_example", "//", "batch_size", "for", "s", "in", "iter_refactor", ":", "if", "begin_epoch", ">=", "s", ":", "lr", "*=", "lr_refactor_ratio", "if", "lr", "!=", "learning_rate", ":", "logging", ".", "getLogger", "(", ")", ".", "info", "(", "\"Adjusted learning rate to {} for epoch {}\"", ".", "format", "(", "lr", ",", "begin_epoch", ")", ")", "steps", "=", "[", "epoch_size", "*", "(", "x", "-", "begin_epoch", ")", "for", "x", "in", "iter_refactor", "if", "x", ">", "begin_epoch", "]", "if", "not", "steps", ":", "return", "(", "lr", ",", "None", ")", "lr_scheduler", "=", "mx", ".", "lr_scheduler", ".", "MultiFactorScheduler", "(", "step", "=", "steps", ",", "factor", "=", "lr_refactor_ratio", ")", "return", "(", "lr", ",", "lr_scheduler", ")" ]
Compute learning rate and refactor scheduler Parameters: --------- learning_rate : float original learning rate lr_refactor_step : comma separated str epochs to change learning rate lr_refactor_ratio : float lr *= ratio at certain steps num_example : int number of training images, used to estimate the iterations given epochs batch_size : int training batch size begin_epoch : int starting epoch Returns: --------- (learning_rate, mx.lr_scheduler) as tuple
[ "Compute", "learning", "rate", "and", "refactor", "scheduler" ]
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/train/train_net.py#L48-L88
24,178
slundberg/shap
shap/datasets.py
imagenet50
def imagenet50(display=False, resolution=224): """ This is a set of 50 images representative of ImageNet images. This dataset was collected by randomly finding a working ImageNet link and then pasting the original ImageNet image into Google image search restricted to images licensed for reuse. A similar image (now with rights to reuse) was downloaded as a rough replacment for the original ImageNet image. The point is to have a random sample of ImageNet for use as a background distribution for explaining models trained on ImageNet data. Note that because the images are only rough replacements the labels might no longer be correct. """ prefix = github_data_url + "imagenet50_" X = np.load(cache(prefix + "%sx%s.npy" % (resolution, resolution))).astype(np.float32) y = np.loadtxt(cache(prefix + "labels.csv")) return X, y
python
def imagenet50(display=False, resolution=224): """ This is a set of 50 images representative of ImageNet images. This dataset was collected by randomly finding a working ImageNet link and then pasting the original ImageNet image into Google image search restricted to images licensed for reuse. A similar image (now with rights to reuse) was downloaded as a rough replacment for the original ImageNet image. The point is to have a random sample of ImageNet for use as a background distribution for explaining models trained on ImageNet data. Note that because the images are only rough replacements the labels might no longer be correct. """ prefix = github_data_url + "imagenet50_" X = np.load(cache(prefix + "%sx%s.npy" % (resolution, resolution))).astype(np.float32) y = np.loadtxt(cache(prefix + "labels.csv")) return X, y
[ "def", "imagenet50", "(", "display", "=", "False", ",", "resolution", "=", "224", ")", ":", "prefix", "=", "github_data_url", "+", "\"imagenet50_\"", "X", "=", "np", ".", "load", "(", "cache", "(", "prefix", "+", "\"%sx%s.npy\"", "%", "(", "resolution", ",", "resolution", ")", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "y", "=", "np", ".", "loadtxt", "(", "cache", "(", "prefix", "+", "\"labels.csv\"", ")", ")", "return", "X", ",", "y" ]
This is a set of 50 images representative of ImageNet images. This dataset was collected by randomly finding a working ImageNet link and then pasting the original ImageNet image into Google image search restricted to images licensed for reuse. A similar image (now with rights to reuse) was downloaded as a rough replacment for the original ImageNet image. The point is to have a random sample of ImageNet for use as a background distribution for explaining models trained on ImageNet data. Note that because the images are only rough replacements the labels might no longer be correct.
[ "This", "is", "a", "set", "of", "50", "images", "representative", "of", "ImageNet", "images", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L13-L28
24,179
slundberg/shap
shap/datasets.py
boston
def boston(display=False): """ Return the boston housing data in a nice package. """ d = sklearn.datasets.load_boston() df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101 return df, d.target
python
def boston(display=False): """ Return the boston housing data in a nice package. """ d = sklearn.datasets.load_boston() df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101 return df, d.target
[ "def", "boston", "(", "display", "=", "False", ")", ":", "d", "=", "sklearn", ".", "datasets", ".", "load_boston", "(", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "d", ".", "data", ",", "columns", "=", "d", ".", "feature_names", ")", "# pylint: disable=E1101", "return", "df", ",", "d", ".", "target" ]
Return the boston housing data in a nice package.
[ "Return", "the", "boston", "housing", "data", "in", "a", "nice", "package", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L30-L35
24,180
slundberg/shap
shap/datasets.py
imdb
def imdb(display=False): """ Return the clssic IMDB sentiment analysis training data in a nice package. Full data is at: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz Paper to cite when using the data is: http://www.aclweb.org/anthology/P11-1015 """ with open(cache(github_data_url + "imdb_train.txt")) as f: data = f.readlines() y = np.ones(25000, dtype=np.bool) y[:12500] = 0 return data, y
python
def imdb(display=False): """ Return the clssic IMDB sentiment analysis training data in a nice package. Full data is at: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz Paper to cite when using the data is: http://www.aclweb.org/anthology/P11-1015 """ with open(cache(github_data_url + "imdb_train.txt")) as f: data = f.readlines() y = np.ones(25000, dtype=np.bool) y[:12500] = 0 return data, y
[ "def", "imdb", "(", "display", "=", "False", ")", ":", "with", "open", "(", "cache", "(", "github_data_url", "+", "\"imdb_train.txt\"", ")", ")", "as", "f", ":", "data", "=", "f", ".", "readlines", "(", ")", "y", "=", "np", ".", "ones", "(", "25000", ",", "dtype", "=", "np", ".", "bool", ")", "y", "[", ":", "12500", "]", "=", "0", "return", "data", ",", "y" ]
Return the clssic IMDB sentiment analysis training data in a nice package. Full data is at: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz Paper to cite when using the data is: http://www.aclweb.org/anthology/P11-1015
[ "Return", "the", "clssic", "IMDB", "sentiment", "analysis", "training", "data", "in", "a", "nice", "package", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L37-L48
24,181
slundberg/shap
shap/datasets.py
communitiesandcrime
def communitiesandcrime(display=False): """ Predict total number of non-violent crimes per 100K popuation. This dataset is from the classic UCI Machine Learning repository: https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized """ raw_data = pd.read_csv( cache(github_data_url + "CommViolPredUnnormalizedData.txt"), na_values="?" ) # find the indices where the total violent crimes are known valid_inds = np.where(np.invert(np.isnan(raw_data.iloc[:,-2])))[0] y = np.array(raw_data.iloc[valid_inds,-2], dtype=np.float) # extract the predictive features and remove columns with missing values X = raw_data.iloc[valid_inds,5:-18] valid_cols = np.where(np.isnan(X.values).sum(0) == 0)[0] X = X.iloc[:,valid_cols] return X, y
python
def communitiesandcrime(display=False): """ Predict total number of non-violent crimes per 100K popuation. This dataset is from the classic UCI Machine Learning repository: https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized """ raw_data = pd.read_csv( cache(github_data_url + "CommViolPredUnnormalizedData.txt"), na_values="?" ) # find the indices where the total violent crimes are known valid_inds = np.where(np.invert(np.isnan(raw_data.iloc[:,-2])))[0] y = np.array(raw_data.iloc[valid_inds,-2], dtype=np.float) # extract the predictive features and remove columns with missing values X = raw_data.iloc[valid_inds,5:-18] valid_cols = np.where(np.isnan(X.values).sum(0) == 0)[0] X = X.iloc[:,valid_cols] return X, y
[ "def", "communitiesandcrime", "(", "display", "=", "False", ")", ":", "raw_data", "=", "pd", ".", "read_csv", "(", "cache", "(", "github_data_url", "+", "\"CommViolPredUnnormalizedData.txt\"", ")", ",", "na_values", "=", "\"?\"", ")", "# find the indices where the total violent crimes are known", "valid_inds", "=", "np", ".", "where", "(", "np", ".", "invert", "(", "np", ".", "isnan", "(", "raw_data", ".", "iloc", "[", ":", ",", "-", "2", "]", ")", ")", ")", "[", "0", "]", "y", "=", "np", ".", "array", "(", "raw_data", ".", "iloc", "[", "valid_inds", ",", "-", "2", "]", ",", "dtype", "=", "np", ".", "float", ")", "# extract the predictive features and remove columns with missing values", "X", "=", "raw_data", ".", "iloc", "[", "valid_inds", ",", "5", ":", "-", "18", "]", "valid_cols", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "X", ".", "values", ")", ".", "sum", "(", "0", ")", "==", "0", ")", "[", "0", "]", "X", "=", "X", ".", "iloc", "[", ":", ",", "valid_cols", "]", "return", "X", ",", "y" ]
Predict total number of non-violent crimes per 100K popuation. This dataset is from the classic UCI Machine Learning repository: https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized
[ "Predict", "total", "number", "of", "non", "-", "violent", "crimes", "per", "100K", "popuation", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L50-L71
24,182
slundberg/shap
shap/datasets.py
diabetes
def diabetes(display=False): """ Return the diabetes data in a nice package. """ d = sklearn.datasets.load_diabetes() df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101 return df, d.target
python
def diabetes(display=False): """ Return the diabetes data in a nice package. """ d = sklearn.datasets.load_diabetes() df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101 return df, d.target
[ "def", "diabetes", "(", "display", "=", "False", ")", ":", "d", "=", "sklearn", ".", "datasets", ".", "load_diabetes", "(", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "d", ".", "data", ",", "columns", "=", "d", ".", "feature_names", ")", "# pylint: disable=E1101", "return", "df", ",", "d", ".", "target" ]
Return the diabetes data in a nice package.
[ "Return", "the", "diabetes", "data", "in", "a", "nice", "package", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L73-L78
24,183
slundberg/shap
shap/datasets.py
iris
def iris(display=False): """ Return the classic iris data in a nice package. """ d = sklearn.datasets.load_iris() df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101 if display: return df, [d.target_names[v] for v in d.target] # pylint: disable=E1101 else: return df, d.target
python
def iris(display=False): """ Return the classic iris data in a nice package. """ d = sklearn.datasets.load_iris() df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101 if display: return df, [d.target_names[v] for v in d.target] # pylint: disable=E1101 else: return df, d.target
[ "def", "iris", "(", "display", "=", "False", ")", ":", "d", "=", "sklearn", ".", "datasets", ".", "load_iris", "(", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "d", ".", "data", ",", "columns", "=", "d", ".", "feature_names", ")", "# pylint: disable=E1101", "if", "display", ":", "return", "df", ",", "[", "d", ".", "target_names", "[", "v", "]", "for", "v", "in", "d", ".", "target", "]", "# pylint: disable=E1101", "else", ":", "return", "df", ",", "d", ".", "target" ]
Return the classic iris data in a nice package.
[ "Return", "the", "classic", "iris", "data", "in", "a", "nice", "package", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L81-L89
24,184
slundberg/shap
shap/datasets.py
adult
def adult(display=False): """ Return the Adult census data in a nice package. """ dtypes = [ ("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"), ("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"), ("Occupation", "category"), ("Relationship", "category"), ("Race", "category"), ("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"), ("Hours per week", "float32"), ("Country", "category"), ("Target", "category") ] raw_data = pd.read_csv( cache(github_data_url + "adult.data"), names=[d[0] for d in dtypes], na_values="?", dtype=dict(dtypes) ) data = raw_data.drop(["Education"], axis=1) # redundant with Education-Num filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes)) data["Target"] = data["Target"] == " >50K" rcode = { "Not-in-family": 0, "Unmarried": 1, "Other-relative": 2, "Own-child": 3, "Husband": 4, "Wife": 5 } for k, dtype in filt_dtypes: if dtype == "category": if k == "Relationship": data[k] = np.array([rcode[v.strip()] for v in data[k]]) else: data[k] = data[k].cat.codes if display: return raw_data.drop(["Education", "Target", "fnlwgt"], axis=1), data["Target"].values else: return data.drop(["Target", "fnlwgt"], axis=1), data["Target"].values
python
def adult(display=False): """ Return the Adult census data in a nice package. """ dtypes = [ ("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"), ("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"), ("Occupation", "category"), ("Relationship", "category"), ("Race", "category"), ("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"), ("Hours per week", "float32"), ("Country", "category"), ("Target", "category") ] raw_data = pd.read_csv( cache(github_data_url + "adult.data"), names=[d[0] for d in dtypes], na_values="?", dtype=dict(dtypes) ) data = raw_data.drop(["Education"], axis=1) # redundant with Education-Num filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes)) data["Target"] = data["Target"] == " >50K" rcode = { "Not-in-family": 0, "Unmarried": 1, "Other-relative": 2, "Own-child": 3, "Husband": 4, "Wife": 5 } for k, dtype in filt_dtypes: if dtype == "category": if k == "Relationship": data[k] = np.array([rcode[v.strip()] for v in data[k]]) else: data[k] = data[k].cat.codes if display: return raw_data.drop(["Education", "Target", "fnlwgt"], axis=1), data["Target"].values else: return data.drop(["Target", "fnlwgt"], axis=1), data["Target"].values
[ "def", "adult", "(", "display", "=", "False", ")", ":", "dtypes", "=", "[", "(", "\"Age\"", ",", "\"float32\"", ")", ",", "(", "\"Workclass\"", ",", "\"category\"", ")", ",", "(", "\"fnlwgt\"", ",", "\"float32\"", ")", ",", "(", "\"Education\"", ",", "\"category\"", ")", ",", "(", "\"Education-Num\"", ",", "\"float32\"", ")", ",", "(", "\"Marital Status\"", ",", "\"category\"", ")", ",", "(", "\"Occupation\"", ",", "\"category\"", ")", ",", "(", "\"Relationship\"", ",", "\"category\"", ")", ",", "(", "\"Race\"", ",", "\"category\"", ")", ",", "(", "\"Sex\"", ",", "\"category\"", ")", ",", "(", "\"Capital Gain\"", ",", "\"float32\"", ")", ",", "(", "\"Capital Loss\"", ",", "\"float32\"", ")", ",", "(", "\"Hours per week\"", ",", "\"float32\"", ")", ",", "(", "\"Country\"", ",", "\"category\"", ")", ",", "(", "\"Target\"", ",", "\"category\"", ")", "]", "raw_data", "=", "pd", ".", "read_csv", "(", "cache", "(", "github_data_url", "+", "\"adult.data\"", ")", ",", "names", "=", "[", "d", "[", "0", "]", "for", "d", "in", "dtypes", "]", ",", "na_values", "=", "\"?\"", ",", "dtype", "=", "dict", "(", "dtypes", ")", ")", "data", "=", "raw_data", ".", "drop", "(", "[", "\"Education\"", "]", ",", "axis", "=", "1", ")", "# redundant with Education-Num", "filt_dtypes", "=", "list", "(", "filter", "(", "lambda", "x", ":", "not", "(", "x", "[", "0", "]", "in", "[", "\"Target\"", ",", "\"Education\"", "]", ")", ",", "dtypes", ")", ")", "data", "[", "\"Target\"", "]", "=", "data", "[", "\"Target\"", "]", "==", "\" >50K\"", "rcode", "=", "{", "\"Not-in-family\"", ":", "0", ",", "\"Unmarried\"", ":", "1", ",", "\"Other-relative\"", ":", "2", ",", "\"Own-child\"", ":", "3", ",", "\"Husband\"", ":", "4", ",", "\"Wife\"", ":", "5", "}", "for", "k", ",", "dtype", "in", "filt_dtypes", ":", "if", "dtype", "==", "\"category\"", ":", "if", "k", "==", "\"Relationship\"", ":", "data", "[", "k", "]", "=", "np", ".", "array", "(", "[", "rcode", "[", "v", ".", "strip", "(", ")", "]", "for", "v", "in", "data", "[", "k", "]", "]", ")", "else", ":", "data", "[", "k", "]", "=", "data", "[", "k", "]", ".", "cat", ".", "codes", "if", "display", ":", "return", "raw_data", ".", "drop", "(", "[", "\"Education\"", ",", "\"Target\"", ",", "\"fnlwgt\"", "]", ",", "axis", "=", "1", ")", ",", "data", "[", "\"Target\"", "]", ".", "values", "else", ":", "return", "data", ".", "drop", "(", "[", "\"Target\"", ",", "\"fnlwgt\"", "]", ",", "axis", "=", "1", ")", ",", "data", "[", "\"Target\"", "]", ".", "values" ]
Return the Adult census data in a nice package.
[ "Return", "the", "Adult", "census", "data", "in", "a", "nice", "package", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L92-L128
24,185
slundberg/shap
shap/datasets.py
nhanesi
def nhanesi(display=False): """ A nicely packaged version of NHANES I data with surivival times as labels. """ X = pd.read_csv(cache(github_data_url + "NHANESI_subset_X.csv")) y = pd.read_csv(cache(github_data_url + "NHANESI_subset_y.csv"))["y"] if display: X_display = X.copy() X_display["Sex"] = ["Male" if v == 1 else "Female" for v in X["Sex"]] return X_display, np.array(y) else: return X, np.array(y)
python
def nhanesi(display=False): """ A nicely packaged version of NHANES I data with surivival times as labels. """ X = pd.read_csv(cache(github_data_url + "NHANESI_subset_X.csv")) y = pd.read_csv(cache(github_data_url + "NHANESI_subset_y.csv"))["y"] if display: X_display = X.copy() X_display["Sex"] = ["Male" if v == 1 else "Female" for v in X["Sex"]] return X_display, np.array(y) else: return X, np.array(y)
[ "def", "nhanesi", "(", "display", "=", "False", ")", ":", "X", "=", "pd", ".", "read_csv", "(", "cache", "(", "github_data_url", "+", "\"NHANESI_subset_X.csv\"", ")", ")", "y", "=", "pd", ".", "read_csv", "(", "cache", "(", "github_data_url", "+", "\"NHANESI_subset_y.csv\"", ")", ")", "[", "\"y\"", "]", "if", "display", ":", "X_display", "=", "X", ".", "copy", "(", ")", "X_display", "[", "\"Sex\"", "]", "=", "[", "\"Male\"", "if", "v", "==", "1", "else", "\"Female\"", "for", "v", "in", "X", "[", "\"Sex\"", "]", "]", "return", "X_display", ",", "np", ".", "array", "(", "y", ")", "else", ":", "return", "X", ",", "np", ".", "array", "(", "y", ")" ]
A nicely packaged version of NHANES I data with surivival times as labels.
[ "A", "nicely", "packaged", "version", "of", "NHANES", "I", "data", "with", "surivival", "times", "as", "labels", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L131-L141
24,186
slundberg/shap
shap/datasets.py
cric
def cric(display=False): """ A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label. """ X = pd.read_csv(cache(github_data_url + "CRIC_time_4yearESRD_X.csv")) y = np.loadtxt(cache(github_data_url + "CRIC_time_4yearESRD_y.csv")) if display: X_display = X.copy() return X_display, y else: return X, y
python
def cric(display=False): """ A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label. """ X = pd.read_csv(cache(github_data_url + "CRIC_time_4yearESRD_X.csv")) y = np.loadtxt(cache(github_data_url + "CRIC_time_4yearESRD_y.csv")) if display: X_display = X.copy() return X_display, y else: return X, y
[ "def", "cric", "(", "display", "=", "False", ")", ":", "X", "=", "pd", ".", "read_csv", "(", "cache", "(", "github_data_url", "+", "\"CRIC_time_4yearESRD_X.csv\"", ")", ")", "y", "=", "np", ".", "loadtxt", "(", "cache", "(", "github_data_url", "+", "\"CRIC_time_4yearESRD_y.csv\"", ")", ")", "if", "display", ":", "X_display", "=", "X", ".", "copy", "(", ")", "return", "X_display", ",", "y", "else", ":", "return", "X", ",", "y" ]
A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label.
[ "A", "nicely", "packaged", "version", "of", "CRIC", "data", "with", "progression", "to", "ESRD", "within", "4", "years", "as", "the", "label", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L143-L152
24,187
slundberg/shap
shap/datasets.py
corrgroups60
def corrgroups60(display=False): """ Correlated Groups 60 A simulated dataset with tight correlations among distinct groups of features. """ # set a constant seed old_seed = np.random.seed() np.random.seed(0) # generate dataset with known correlation N = 1000 M = 60 # set one coefficent from each group of 3 to 1 beta = np.zeros(M) beta[0:30:3] = 1 # build a correlation matrix with groups of 3 tightly correlated features C = np.eye(M) for i in range(0,30,3): C[i,i+1] = C[i+1,i] = 0.99 C[i,i+2] = C[i+2,i] = 0.99 C[i+1,i+2] = C[i+2,i+1] = 0.99 f = lambda X: np.matmul(X, beta) # Make sure the sample correlation is a perfect match X_start = np.random.randn(N, M) X_centered = X_start - X_start.mean(0) Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0] W = np.linalg.cholesky(np.linalg.inv(Sigma)).T X_white = np.matmul(X_centered, W.T) assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data # create the final data X_final = np.matmul(X_white, np.linalg.cholesky(C).T) X = X_final y = f(X) + np.random.randn(N) * 1e-2 # restore the previous numpy random seed np.random.seed(old_seed) return pd.DataFrame(X), y
python
def corrgroups60(display=False): """ Correlated Groups 60 A simulated dataset with tight correlations among distinct groups of features. """ # set a constant seed old_seed = np.random.seed() np.random.seed(0) # generate dataset with known correlation N = 1000 M = 60 # set one coefficent from each group of 3 to 1 beta = np.zeros(M) beta[0:30:3] = 1 # build a correlation matrix with groups of 3 tightly correlated features C = np.eye(M) for i in range(0,30,3): C[i,i+1] = C[i+1,i] = 0.99 C[i,i+2] = C[i+2,i] = 0.99 C[i+1,i+2] = C[i+2,i+1] = 0.99 f = lambda X: np.matmul(X, beta) # Make sure the sample correlation is a perfect match X_start = np.random.randn(N, M) X_centered = X_start - X_start.mean(0) Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0] W = np.linalg.cholesky(np.linalg.inv(Sigma)).T X_white = np.matmul(X_centered, W.T) assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data # create the final data X_final = np.matmul(X_white, np.linalg.cholesky(C).T) X = X_final y = f(X) + np.random.randn(N) * 1e-2 # restore the previous numpy random seed np.random.seed(old_seed) return pd.DataFrame(X), y
[ "def", "corrgroups60", "(", "display", "=", "False", ")", ":", "# set a constant seed", "old_seed", "=", "np", ".", "random", ".", "seed", "(", ")", "np", ".", "random", ".", "seed", "(", "0", ")", "# generate dataset with known correlation", "N", "=", "1000", "M", "=", "60", "# set one coefficent from each group of 3 to 1", "beta", "=", "np", ".", "zeros", "(", "M", ")", "beta", "[", "0", ":", "30", ":", "3", "]", "=", "1", "# build a correlation matrix with groups of 3 tightly correlated features", "C", "=", "np", ".", "eye", "(", "M", ")", "for", "i", "in", "range", "(", "0", ",", "30", ",", "3", ")", ":", "C", "[", "i", ",", "i", "+", "1", "]", "=", "C", "[", "i", "+", "1", ",", "i", "]", "=", "0.99", "C", "[", "i", ",", "i", "+", "2", "]", "=", "C", "[", "i", "+", "2", ",", "i", "]", "=", "0.99", "C", "[", "i", "+", "1", ",", "i", "+", "2", "]", "=", "C", "[", "i", "+", "2", ",", "i", "+", "1", "]", "=", "0.99", "f", "=", "lambda", "X", ":", "np", ".", "matmul", "(", "X", ",", "beta", ")", "# Make sure the sample correlation is a perfect match", "X_start", "=", "np", ".", "random", ".", "randn", "(", "N", ",", "M", ")", "X_centered", "=", "X_start", "-", "X_start", ".", "mean", "(", "0", ")", "Sigma", "=", "np", ".", "matmul", "(", "X_centered", ".", "T", ",", "X_centered", ")", "/", "X_centered", ".", "shape", "[", "0", "]", "W", "=", "np", ".", "linalg", ".", "cholesky", "(", "np", ".", "linalg", ".", "inv", "(", "Sigma", ")", ")", ".", "T", "X_white", "=", "np", ".", "matmul", "(", "X_centered", ",", "W", ".", "T", ")", "assert", "np", ".", "linalg", ".", "norm", "(", "np", ".", "corrcoef", "(", "np", ".", "matmul", "(", "X_centered", ",", "W", ".", "T", ")", ".", "T", ")", "-", "np", ".", "eye", "(", "M", ")", ")", "<", "1e-6", "# ensure this decorrelates the data", "# create the final data", "X_final", "=", "np", ".", "matmul", "(", "X_white", ",", "np", ".", "linalg", ".", "cholesky", "(", "C", ")", ".", "T", ")", "X", "=", "X_final", "y", "=", "f", "(", "X", ")", "+", "np", ".", "random", ".", "randn", "(", "N", ")", "*", "1e-2", "# restore the previous numpy random seed", "np", ".", "random", ".", "seed", "(", "old_seed", ")", "return", "pd", ".", "DataFrame", "(", "X", ")", ",", "y" ]
Correlated Groups 60 A simulated dataset with tight correlations among distinct groups of features.
[ "Correlated", "Groups", "60", "A", "simulated", "dataset", "with", "tight", "correlations", "among", "distinct", "groups", "of", "features", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L155-L197
24,188
slundberg/shap
shap/datasets.py
independentlinear60
def independentlinear60(display=False): """ A simulated dataset with tight correlations among distinct groups of features. """ # set a constant seed old_seed = np.random.seed() np.random.seed(0) # generate dataset with known correlation N = 1000 M = 60 # set one coefficent from each group of 3 to 1 beta = np.zeros(M) beta[0:30:3] = 1 f = lambda X: np.matmul(X, beta) # Make sure the sample correlation is a perfect match X_start = np.random.randn(N, M) X = X_start - X_start.mean(0) y = f(X) + np.random.randn(N) * 1e-2 # restore the previous numpy random seed np.random.seed(old_seed) return pd.DataFrame(X), y
python
def independentlinear60(display=False): """ A simulated dataset with tight correlations among distinct groups of features. """ # set a constant seed old_seed = np.random.seed() np.random.seed(0) # generate dataset with known correlation N = 1000 M = 60 # set one coefficent from each group of 3 to 1 beta = np.zeros(M) beta[0:30:3] = 1 f = lambda X: np.matmul(X, beta) # Make sure the sample correlation is a perfect match X_start = np.random.randn(N, M) X = X_start - X_start.mean(0) y = f(X) + np.random.randn(N) * 1e-2 # restore the previous numpy random seed np.random.seed(old_seed) return pd.DataFrame(X), y
[ "def", "independentlinear60", "(", "display", "=", "False", ")", ":", "# set a constant seed", "old_seed", "=", "np", ".", "random", ".", "seed", "(", ")", "np", ".", "random", ".", "seed", "(", "0", ")", "# generate dataset with known correlation", "N", "=", "1000", "M", "=", "60", "# set one coefficent from each group of 3 to 1", "beta", "=", "np", ".", "zeros", "(", "M", ")", "beta", "[", "0", ":", "30", ":", "3", "]", "=", "1", "f", "=", "lambda", "X", ":", "np", ".", "matmul", "(", "X", ",", "beta", ")", "# Make sure the sample correlation is a perfect match", "X_start", "=", "np", ".", "random", ".", "randn", "(", "N", ",", "M", ")", "X", "=", "X_start", "-", "X_start", ".", "mean", "(", "0", ")", "y", "=", "f", "(", "X", ")", "+", "np", ".", "random", ".", "randn", "(", "N", ")", "*", "1e-2", "# restore the previous numpy random seed", "np", ".", "random", ".", "seed", "(", "old_seed", ")", "return", "pd", ".", "DataFrame", "(", "X", ")", ",", "y" ]
A simulated dataset with tight correlations among distinct groups of features.
[ "A", "simulated", "dataset", "with", "tight", "correlations", "among", "distinct", "groups", "of", "features", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L200-L225
24,189
slundberg/shap
shap/datasets.py
rank
def rank(): """ Ranking datasets from lightgbm repository. """ rank_data_url = 'https://raw.githubusercontent.com/Microsoft/LightGBM/master/examples/lambdarank/' x_train, y_train = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.train')) x_test, y_test = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.test')) q_train = np.loadtxt(cache(rank_data_url + 'rank.train.query')) q_test = np.loadtxt(cache(rank_data_url + 'rank.test.query')) return x_train, y_train, x_test, y_test, q_train, q_test
python
def rank(): """ Ranking datasets from lightgbm repository. """ rank_data_url = 'https://raw.githubusercontent.com/Microsoft/LightGBM/master/examples/lambdarank/' x_train, y_train = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.train')) x_test, y_test = sklearn.datasets.load_svmlight_file(cache(rank_data_url + 'rank.test')) q_train = np.loadtxt(cache(rank_data_url + 'rank.train.query')) q_test = np.loadtxt(cache(rank_data_url + 'rank.test.query')) return x_train, y_train, x_test, y_test, q_train, q_test
[ "def", "rank", "(", ")", ":", "rank_data_url", "=", "'https://raw.githubusercontent.com/Microsoft/LightGBM/master/examples/lambdarank/'", "x_train", ",", "y_train", "=", "sklearn", ".", "datasets", ".", "load_svmlight_file", "(", "cache", "(", "rank_data_url", "+", "'rank.train'", ")", ")", "x_test", ",", "y_test", "=", "sklearn", ".", "datasets", ".", "load_svmlight_file", "(", "cache", "(", "rank_data_url", "+", "'rank.test'", ")", ")", "q_train", "=", "np", ".", "loadtxt", "(", "cache", "(", "rank_data_url", "+", "'rank.train.query'", ")", ")", "q_test", "=", "np", ".", "loadtxt", "(", "cache", "(", "rank_data_url", "+", "'rank.test.query'", ")", ")", "return", "x_train", ",", "y_train", ",", "x_test", ",", "y_test", ",", "q_train", ",", "q_test" ]
Ranking datasets from lightgbm repository.
[ "Ranking", "datasets", "from", "lightgbm", "repository", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/datasets.py#L234-L242
24,190
slundberg/shap
shap/benchmark/measures.py
batch_remove_retrain
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric): """ An approximation of holdout that only retraines the model once. This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally efficient that the holdout method because it masks the most important features in every sample and then retrains the model once, instead of retraining the model for every test sample like the holdout metric. """ warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!") X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # mask nmask top features for each explanation X_train_tmp = X_train.copy() X_train_mean = X_train.mean(0) tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6 for i in range(len(y_train)): if nmask_train[i] > 0: ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise) X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]] X_test_tmp = X_test.copy() for i in range(len(y_test)): if nmask_test[i] > 0: ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise) X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]] # train the model with all the given features masked model_masked = model_generator() model_masked.fit(X_train_tmp, y_train) yp_test_masked = model_masked.predict(X_test_tmp) return metric(y_test, yp_test_masked)
python
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric): """ An approximation of holdout that only retraines the model once. This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally efficient that the holdout method because it masks the most important features in every sample and then retrains the model once, instead of retraining the model for every test sample like the holdout metric. """ warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!") X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # mask nmask top features for each explanation X_train_tmp = X_train.copy() X_train_mean = X_train.mean(0) tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6 for i in range(len(y_train)): if nmask_train[i] > 0: ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise) X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]] X_test_tmp = X_test.copy() for i in range(len(y_test)): if nmask_test[i] > 0: ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise) X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]] # train the model with all the given features masked model_masked = model_generator() model_masked.fit(X_train_tmp, y_train) yp_test_masked = model_masked.predict(X_test_tmp) return metric(y_test, yp_test_masked)
[ "def", "batch_remove_retrain", "(", "nmask_train", ",", "nmask_test", ",", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ",", "attr_train", ",", "attr_test", ",", "model_generator", ",", "metric", ")", ":", "warnings", ".", "warn", "(", "\"The retrain based measures can incorrectly evaluate models in some cases!\"", ")", "X_train", ",", "X_test", "=", "to_array", "(", "X_train", ",", "X_test", ")", "# how many features to mask", "assert", "X_train", ".", "shape", "[", "1", "]", "==", "X_test", ".", "shape", "[", "1", "]", "# mask nmask top features for each explanation", "X_train_tmp", "=", "X_train", ".", "copy", "(", ")", "X_train_mean", "=", "X_train", ".", "mean", "(", "0", ")", "tie_breaking_noise", "=", "const_rand", "(", "X_train", ".", "shape", "[", "1", "]", ")", "*", "1e-6", "for", "i", "in", "range", "(", "len", "(", "y_train", ")", ")", ":", "if", "nmask_train", "[", "i", "]", ">", "0", ":", "ordering", "=", "np", ".", "argsort", "(", "-", "attr_train", "[", "i", ",", ":", "]", "+", "tie_breaking_noise", ")", "X_train_tmp", "[", "i", ",", "ordering", "[", ":", "nmask_train", "[", "i", "]", "]", "]", "=", "X_train_mean", "[", "ordering", "[", ":", "nmask_train", "[", "i", "]", "]", "]", "X_test_tmp", "=", "X_test", ".", "copy", "(", ")", "for", "i", "in", "range", "(", "len", "(", "y_test", ")", ")", ":", "if", "nmask_test", "[", "i", "]", ">", "0", ":", "ordering", "=", "np", ".", "argsort", "(", "-", "attr_test", "[", "i", ",", ":", "]", "+", "tie_breaking_noise", ")", "X_test_tmp", "[", "i", ",", "ordering", "[", ":", "nmask_test", "[", "i", "]", "]", "]", "=", "X_train_mean", "[", "ordering", "[", ":", "nmask_test", "[", "i", "]", "]", "]", "# train the model with all the given features masked", "model_masked", "=", "model_generator", "(", ")", "model_masked", ".", "fit", "(", "X_train_tmp", ",", "y_train", ")", "yp_test_masked", "=", "model_masked", ".", "predict", "(", "X_test_tmp", ")", "return", "metric", "(", "y_test", ",", "yp_test_masked", ")" ]
An approximation of holdout that only retraines the model once. This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally efficient that the holdout method because it masks the most important features in every sample and then retrains the model once, instead of retraining the model for every test sample like the holdout metric.
[ "An", "approximation", "of", "holdout", "that", "only", "retraines", "the", "model", "once", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L158-L193
24,191
slundberg/shap
shap/benchmark/measures.py
keep_retrain
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): """ The model is retrained for each test sample with the non-important features set to a constant. If you want to know how important a set of features is you can ask how the model would be different if only those features had existed. To determine this we can mask the other features across the entire training and test datasets, then retrain the model. If we apply compare the output of this retrained model to the original model we can see the effect produced by only knowning the important features. Since for individualized explanation methods each test sample has a different set of most important features we need to retrain the model for every test sample to get the change in model performance when a specified fraction of the most important features are retained. """ warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!") # see if we match the last cached call global _keep_cache args = (X_train, y_train, X_test, y_test, model_generator, metric) cache_match = False if "args" in _keep_cache: if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test): cache_match = True X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # this is the model we will retrain many times model_masked = model_generator() # keep nkeep top features and re-train the model for each test explanation X_train_tmp = np.zeros(X_train.shape) X_test_tmp = np.zeros(X_test.shape) yp_masked_test = np.zeros(y_test.shape) tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6 last_nkeep = _keep_cache.get("nkeep", None) last_yp_masked_test = _keep_cache.get("yp_masked_test", None) for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"): if cache_match and last_nkeep[i] == nkeep[i]: yp_masked_test[i] = last_yp_masked_test[i] elif nkeep[i] == attr_test.shape[1]: yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0] else: # mask out the most important features for this test instance X_train_tmp[:] = X_train X_test_tmp[:] = X_test ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean() X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean() # retrain the model and make a prediction model_masked.fit(X_train_tmp, y_train) yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0] # save our results so the next call to us can be faster when there is redundancy _keep_cache["nkeep"] = nkeep _keep_cache["yp_masked_test"] = yp_masked_test _keep_cache["attr_test"] = attr_test _keep_cache["args"] = args return metric(y_test, yp_masked_test)
python
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): """ The model is retrained for each test sample with the non-important features set to a constant. If you want to know how important a set of features is you can ask how the model would be different if only those features had existed. To determine this we can mask the other features across the entire training and test datasets, then retrain the model. If we apply compare the output of this retrained model to the original model we can see the effect produced by only knowning the important features. Since for individualized explanation methods each test sample has a different set of most important features we need to retrain the model for every test sample to get the change in model performance when a specified fraction of the most important features are retained. """ warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!") # see if we match the last cached call global _keep_cache args = (X_train, y_train, X_test, y_test, model_generator, metric) cache_match = False if "args" in _keep_cache: if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test): cache_match = True X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # this is the model we will retrain many times model_masked = model_generator() # keep nkeep top features and re-train the model for each test explanation X_train_tmp = np.zeros(X_train.shape) X_test_tmp = np.zeros(X_test.shape) yp_masked_test = np.zeros(y_test.shape) tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6 last_nkeep = _keep_cache.get("nkeep", None) last_yp_masked_test = _keep_cache.get("yp_masked_test", None) for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"): if cache_match and last_nkeep[i] == nkeep[i]: yp_masked_test[i] = last_yp_masked_test[i] elif nkeep[i] == attr_test.shape[1]: yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0] else: # mask out the most important features for this test instance X_train_tmp[:] = X_train X_test_tmp[:] = X_test ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean() X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean() # retrain the model and make a prediction model_masked.fit(X_train_tmp, y_train) yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0] # save our results so the next call to us can be faster when there is redundancy _keep_cache["nkeep"] = nkeep _keep_cache["yp_masked_test"] = yp_masked_test _keep_cache["attr_test"] = attr_test _keep_cache["args"] = args return metric(y_test, yp_masked_test)
[ "def", "keep_retrain", "(", "nkeep", ",", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ",", "attr_test", ",", "model_generator", ",", "metric", ",", "trained_model", ",", "random_state", ")", ":", "warnings", ".", "warn", "(", "\"The retrain based measures can incorrectly evaluate models in some cases!\"", ")", "# see if we match the last cached call", "global", "_keep_cache", "args", "=", "(", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ",", "model_generator", ",", "metric", ")", "cache_match", "=", "False", "if", "\"args\"", "in", "_keep_cache", ":", "if", "all", "(", "a", "is", "b", "for", "a", ",", "b", "in", "zip", "(", "_keep_cache", "[", "\"args\"", "]", ",", "args", ")", ")", "and", "np", ".", "all", "(", "_keep_cache", "[", "\"attr_test\"", "]", "==", "attr_test", ")", ":", "cache_match", "=", "True", "X_train", ",", "X_test", "=", "to_array", "(", "X_train", ",", "X_test", ")", "# how many features to mask", "assert", "X_train", ".", "shape", "[", "1", "]", "==", "X_test", ".", "shape", "[", "1", "]", "# this is the model we will retrain many times", "model_masked", "=", "model_generator", "(", ")", "# keep nkeep top features and re-train the model for each test explanation", "X_train_tmp", "=", "np", ".", "zeros", "(", "X_train", ".", "shape", ")", "X_test_tmp", "=", "np", ".", "zeros", "(", "X_test", ".", "shape", ")", "yp_masked_test", "=", "np", ".", "zeros", "(", "y_test", ".", "shape", ")", "tie_breaking_noise", "=", "const_rand", "(", "X_train", ".", "shape", "[", "1", "]", ")", "*", "1e-6", "last_nkeep", "=", "_keep_cache", ".", "get", "(", "\"nkeep\"", ",", "None", ")", "last_yp_masked_test", "=", "_keep_cache", ".", "get", "(", "\"yp_masked_test\"", ",", "None", ")", "for", "i", "in", "tqdm", "(", "range", "(", "len", "(", "y_test", ")", ")", ",", "\"Retraining for the 'keep' metric\"", ")", ":", "if", "cache_match", "and", "last_nkeep", "[", "i", "]", "==", "nkeep", "[", "i", "]", ":", "yp_masked_test", "[", "i", "]", "=", "last_yp_masked_test", "[", "i", "]", "elif", "nkeep", "[", "i", "]", "==", "attr_test", ".", "shape", "[", "1", "]", ":", "yp_masked_test", "[", "i", "]", "=", "trained_model", ".", "predict", "(", "X_test", "[", "i", ":", "i", "+", "1", "]", ")", "[", "0", "]", "else", ":", "# mask out the most important features for this test instance", "X_train_tmp", "[", ":", "]", "=", "X_train", "X_test_tmp", "[", ":", "]", "=", "X_test", "ordering", "=", "np", ".", "argsort", "(", "-", "attr_test", "[", "i", ",", ":", "]", "+", "tie_breaking_noise", ")", "X_train_tmp", "[", ":", ",", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "]", "=", "X_train", "[", ":", ",", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "]", ".", "mean", "(", ")", "X_test_tmp", "[", "i", ",", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "]", "=", "X_train", "[", ":", ",", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "]", ".", "mean", "(", ")", "# retrain the model and make a prediction", "model_masked", ".", "fit", "(", "X_train_tmp", ",", "y_train", ")", "yp_masked_test", "[", "i", "]", "=", "model_masked", ".", "predict", "(", "X_test_tmp", "[", "i", ":", "i", "+", "1", "]", ")", "[", "0", "]", "# save our results so the next call to us can be faster when there is redundancy", "_keep_cache", "[", "\"nkeep\"", "]", "=", "nkeep", "_keep_cache", "[", "\"yp_masked_test\"", "]", "=", "yp_masked_test", "_keep_cache", "[", "\"attr_test\"", "]", "=", "attr_test", "_keep_cache", "[", "\"args\"", "]", "=", "args", "return", "metric", "(", "y_test", ",", "yp_masked_test", ")" ]
The model is retrained for each test sample with the non-important features set to a constant. If you want to know how important a set of features is you can ask how the model would be different if only those features had existed. To determine this we can mask the other features across the entire training and test datasets, then retrain the model. If we apply compare the output of this retrained model to the original model we can see the effect produced by only knowning the important features. Since for individualized explanation methods each test sample has a different set of most important features we need to retrain the model for every test sample to get the change in model performance when a specified fraction of the most important features are retained.
[ "The", "model", "is", "retrained", "for", "each", "test", "sample", "with", "the", "non", "-", "important", "features", "set", "to", "a", "constant", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L196-L258
24,192
slundberg/shap
shap/benchmark/measures.py
keep_mask
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): """ The model is revaluated for each test sample with the non-important features set to their mean. """ X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # keep nkeep top features for each test explanation X_test_tmp = X_test.copy() yp_masked_test = np.zeros(y_test.shape) tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6 mean_vals = X_train.mean(0) for i in range(len(y_test)): if nkeep[i] < X_test.shape[1]: ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]] yp_masked_test = trained_model.predict(X_test_tmp) return metric(y_test, yp_masked_test)
python
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): """ The model is revaluated for each test sample with the non-important features set to their mean. """ X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # keep nkeep top features for each test explanation X_test_tmp = X_test.copy() yp_masked_test = np.zeros(y_test.shape) tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6 mean_vals = X_train.mean(0) for i in range(len(y_test)): if nkeep[i] < X_test.shape[1]: ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]] yp_masked_test = trained_model.predict(X_test_tmp) return metric(y_test, yp_masked_test)
[ "def", "keep_mask", "(", "nkeep", ",", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ",", "attr_test", ",", "model_generator", ",", "metric", ",", "trained_model", ",", "random_state", ")", ":", "X_train", ",", "X_test", "=", "to_array", "(", "X_train", ",", "X_test", ")", "# how many features to mask", "assert", "X_train", ".", "shape", "[", "1", "]", "==", "X_test", ".", "shape", "[", "1", "]", "# keep nkeep top features for each test explanation", "X_test_tmp", "=", "X_test", ".", "copy", "(", ")", "yp_masked_test", "=", "np", ".", "zeros", "(", "y_test", ".", "shape", ")", "tie_breaking_noise", "=", "const_rand", "(", "X_train", ".", "shape", "[", "1", "]", ",", "random_state", ")", "*", "1e-6", "mean_vals", "=", "X_train", ".", "mean", "(", "0", ")", "for", "i", "in", "range", "(", "len", "(", "y_test", ")", ")", ":", "if", "nkeep", "[", "i", "]", "<", "X_test", ".", "shape", "[", "1", "]", ":", "ordering", "=", "np", ".", "argsort", "(", "-", "attr_test", "[", "i", ",", ":", "]", "+", "tie_breaking_noise", ")", "X_test_tmp", "[", "i", ",", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "]", "=", "mean_vals", "[", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "]", "yp_masked_test", "=", "trained_model", ".", "predict", "(", "X_test_tmp", ")", "return", "metric", "(", "y_test", ",", "yp_masked_test", ")" ]
The model is revaluated for each test sample with the non-important features set to their mean.
[ "The", "model", "is", "revaluated", "for", "each", "test", "sample", "with", "the", "non", "-", "important", "features", "set", "to", "their", "mean", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L260-L281
24,193
slundberg/shap
shap/benchmark/measures.py
keep_impute
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): """ The model is revaluated for each test sample with the non-important features set to an imputed value. Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should be significantly bigger than X_train.shape[1]. """ X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # keep nkeep top features for each test explanation C = np.cov(X_train.T) C += np.eye(C.shape[0]) * 1e-6 X_test_tmp = X_test.copy() yp_masked_test = np.zeros(y_test.shape) tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6 mean_vals = X_train.mean(0) for i in range(len(y_test)): if nkeep[i] < X_test.shape[1]: ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) observe_inds = ordering[:nkeep[i]] impute_inds = ordering[nkeep[i]:] # impute missing data assuming it follows a multivariate normal distribution Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds]) Cio = C[impute_inds,:][:,observe_inds] impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds]) X_test_tmp[i, impute_inds] = impute yp_masked_test = trained_model.predict(X_test_tmp) return metric(y_test, yp_masked_test)
python
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): """ The model is revaluated for each test sample with the non-important features set to an imputed value. Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should be significantly bigger than X_train.shape[1]. """ X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # keep nkeep top features for each test explanation C = np.cov(X_train.T) C += np.eye(C.shape[0]) * 1e-6 X_test_tmp = X_test.copy() yp_masked_test = np.zeros(y_test.shape) tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6 mean_vals = X_train.mean(0) for i in range(len(y_test)): if nkeep[i] < X_test.shape[1]: ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) observe_inds = ordering[:nkeep[i]] impute_inds = ordering[nkeep[i]:] # impute missing data assuming it follows a multivariate normal distribution Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds]) Cio = C[impute_inds,:][:,observe_inds] impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds]) X_test_tmp[i, impute_inds] = impute yp_masked_test = trained_model.predict(X_test_tmp) return metric(y_test, yp_masked_test)
[ "def", "keep_impute", "(", "nkeep", ",", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ",", "attr_test", ",", "model_generator", ",", "metric", ",", "trained_model", ",", "random_state", ")", ":", "X_train", ",", "X_test", "=", "to_array", "(", "X_train", ",", "X_test", ")", "# how many features to mask", "assert", "X_train", ".", "shape", "[", "1", "]", "==", "X_test", ".", "shape", "[", "1", "]", "# keep nkeep top features for each test explanation", "C", "=", "np", ".", "cov", "(", "X_train", ".", "T", ")", "C", "+=", "np", ".", "eye", "(", "C", ".", "shape", "[", "0", "]", ")", "*", "1e-6", "X_test_tmp", "=", "X_test", ".", "copy", "(", ")", "yp_masked_test", "=", "np", ".", "zeros", "(", "y_test", ".", "shape", ")", "tie_breaking_noise", "=", "const_rand", "(", "X_train", ".", "shape", "[", "1", "]", ",", "random_state", ")", "*", "1e-6", "mean_vals", "=", "X_train", ".", "mean", "(", "0", ")", "for", "i", "in", "range", "(", "len", "(", "y_test", ")", ")", ":", "if", "nkeep", "[", "i", "]", "<", "X_test", ".", "shape", "[", "1", "]", ":", "ordering", "=", "np", ".", "argsort", "(", "-", "attr_test", "[", "i", ",", ":", "]", "+", "tie_breaking_noise", ")", "observe_inds", "=", "ordering", "[", ":", "nkeep", "[", "i", "]", "]", "impute_inds", "=", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "# impute missing data assuming it follows a multivariate normal distribution", "Coo_inv", "=", "np", ".", "linalg", ".", "inv", "(", "C", "[", "observe_inds", ",", ":", "]", "[", ":", ",", "observe_inds", "]", ")", "Cio", "=", "C", "[", "impute_inds", ",", ":", "]", "[", ":", ",", "observe_inds", "]", "impute", "=", "mean_vals", "[", "impute_inds", "]", "+", "Cio", "@", "Coo_inv", "@", "(", "X_test", "[", "i", ",", "observe_inds", "]", "-", "mean_vals", "[", "observe_inds", "]", ")", "X_test_tmp", "[", "i", ",", "impute_inds", "]", "=", "impute", "yp_masked_test", "=", "trained_model", ".", "predict", "(", "X_test_tmp", ")", "return", "metric", "(", "y_test", ",", "yp_masked_test", ")" ]
The model is revaluated for each test sample with the non-important features set to an imputed value. Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should be significantly bigger than X_train.shape[1].
[ "The", "model", "is", "revaluated", "for", "each", "test", "sample", "with", "the", "non", "-", "important", "features", "set", "to", "an", "imputed", "value", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L283-L318
24,194
slundberg/shap
shap/benchmark/measures.py
keep_resample
def keep_resample(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): """ The model is revaluated for each test sample with the non-important features set to resample background values. """ # why broken? overwriting? X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # how many samples to take nsamples = 100 # keep nkeep top features for each test explanation N,M = X_test.shape X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M) tie_breaking_noise = const_rand(M) * 1e-6 inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state) for i in range(N): if nkeep[i] < M: ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[nkeep[i]:]] = X_train[inds, :][:, ordering[nkeep[i]:]] yp_masked_test = trained_model.predict(X_test_tmp) yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples return metric(y_test, yp_masked_test)
python
def keep_resample(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): """ The model is revaluated for each test sample with the non-important features set to resample background values. """ # why broken? overwriting? X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # how many samples to take nsamples = 100 # keep nkeep top features for each test explanation N,M = X_test.shape X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M) tie_breaking_noise = const_rand(M) * 1e-6 inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state) for i in range(N): if nkeep[i] < M: ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[nkeep[i]:]] = X_train[inds, :][:, ordering[nkeep[i]:]] yp_masked_test = trained_model.predict(X_test_tmp) yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples return metric(y_test, yp_masked_test)
[ "def", "keep_resample", "(", "nkeep", ",", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ",", "attr_test", ",", "model_generator", ",", "metric", ",", "trained_model", ",", "random_state", ")", ":", "# why broken? overwriting?", "X_train", ",", "X_test", "=", "to_array", "(", "X_train", ",", "X_test", ")", "# how many features to mask", "assert", "X_train", ".", "shape", "[", "1", "]", "==", "X_test", ".", "shape", "[", "1", "]", "# how many samples to take", "nsamples", "=", "100", "# keep nkeep top features for each test explanation", "N", ",", "M", "=", "X_test", ".", "shape", "X_test_tmp", "=", "np", ".", "tile", "(", "X_test", ",", "[", "1", ",", "nsamples", "]", ")", ".", "reshape", "(", "nsamples", "*", "N", ",", "M", ")", "tie_breaking_noise", "=", "const_rand", "(", "M", ")", "*", "1e-6", "inds", "=", "sklearn", ".", "utils", ".", "resample", "(", "np", ".", "arange", "(", "N", ")", ",", "n_samples", "=", "nsamples", ",", "random_state", "=", "random_state", ")", "for", "i", "in", "range", "(", "N", ")", ":", "if", "nkeep", "[", "i", "]", "<", "M", ":", "ordering", "=", "np", ".", "argsort", "(", "-", "attr_test", "[", "i", ",", ":", "]", "+", "tie_breaking_noise", ")", "X_test_tmp", "[", "i", "*", "nsamples", ":", "(", "i", "+", "1", ")", "*", "nsamples", ",", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "]", "=", "X_train", "[", "inds", ",", ":", "]", "[", ":", ",", "ordering", "[", "nkeep", "[", "i", "]", ":", "]", "]", "yp_masked_test", "=", "trained_model", ".", "predict", "(", "X_test_tmp", ")", "yp_masked_test", "=", "np", ".", "reshape", "(", "yp_masked_test", ",", "(", "N", ",", "nsamples", ")", ")", ".", "mean", "(", "1", ")", "# take the mean output over all samples", "return", "metric", "(", "y_test", ",", "yp_masked_test", ")" ]
The model is revaluated for each test sample with the non-important features set to resample background values.
[ "The", "model", "is", "revaluated", "for", "each", "test", "sample", "with", "the", "non", "-", "important", "features", "set", "to", "resample", "background", "values", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L320-L345
24,195
slundberg/shap
shap/benchmark/measures.py
local_accuracy
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model): """ The how well do the features plus a constant base rate sum up to the model output. """ X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # keep nkeep top features and re-train the model for each test explanation yp_test = trained_model.predict(X_test) return metric(yp_test, strip_list(attr_test).sum(1))
python
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model): """ The how well do the features plus a constant base rate sum up to the model output. """ X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # keep nkeep top features and re-train the model for each test explanation yp_test = trained_model.predict(X_test) return metric(yp_test, strip_list(attr_test).sum(1))
[ "def", "local_accuracy", "(", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", ",", "attr_test", ",", "model_generator", ",", "metric", ",", "trained_model", ")", ":", "X_train", ",", "X_test", "=", "to_array", "(", "X_train", ",", "X_test", ")", "# how many features to mask", "assert", "X_train", ".", "shape", "[", "1", "]", "==", "X_test", ".", "shape", "[", "1", "]", "# keep nkeep top features and re-train the model for each test explanation", "yp_test", "=", "trained_model", ".", "predict", "(", "X_test", ")", "return", "metric", "(", "yp_test", ",", "strip_list", "(", "attr_test", ")", ".", "sum", "(", "1", ")", ")" ]
The how well do the features plus a constant base rate sum up to the model output.
[ "The", "how", "well", "do", "the", "features", "plus", "a", "constant", "base", "rate", "sum", "up", "to", "the", "model", "output", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L384-L396
24,196
slundberg/shap
shap/benchmark/measures.py
const_rand
def const_rand(size, seed=23980): """ Generate a random array with a fixed seed. """ old_seed = np.random.seed() np.random.seed(seed) out = np.random.rand(size) np.random.seed(old_seed) return out
python
def const_rand(size, seed=23980): """ Generate a random array with a fixed seed. """ old_seed = np.random.seed() np.random.seed(seed) out = np.random.rand(size) np.random.seed(old_seed) return out
[ "def", "const_rand", "(", "size", ",", "seed", "=", "23980", ")", ":", "old_seed", "=", "np", ".", "random", ".", "seed", "(", ")", "np", ".", "random", ".", "seed", "(", "seed", ")", "out", "=", "np", ".", "random", ".", "rand", "(", "size", ")", "np", ".", "random", ".", "seed", "(", "old_seed", ")", "return", "out" ]
Generate a random array with a fixed seed.
[ "Generate", "a", "random", "array", "with", "a", "fixed", "seed", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L401-L408
24,197
slundberg/shap
shap/benchmark/measures.py
const_shuffle
def const_shuffle(arr, seed=23980): """ Shuffle an array in-place with a fixed seed. """ old_seed = np.random.seed() np.random.seed(seed) np.random.shuffle(arr) np.random.seed(old_seed)
python
def const_shuffle(arr, seed=23980): """ Shuffle an array in-place with a fixed seed. """ old_seed = np.random.seed() np.random.seed(seed) np.random.shuffle(arr) np.random.seed(old_seed)
[ "def", "const_shuffle", "(", "arr", ",", "seed", "=", "23980", ")", ":", "old_seed", "=", "np", ".", "random", ".", "seed", "(", ")", "np", ".", "random", ".", "seed", "(", "seed", ")", "np", ".", "random", ".", "shuffle", "(", "arr", ")", "np", ".", "random", ".", "seed", "(", "old_seed", ")" ]
Shuffle an array in-place with a fixed seed.
[ "Shuffle", "an", "array", "in", "-", "place", "with", "a", "fixed", "seed", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/measures.py#L410-L416
24,198
slundberg/shap
shap/common.py
hclust_ordering
def hclust_ordering(X, metric="sqeuclidean"): """ A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar. """ # compute a hierarchical clustering D = sp.spatial.distance.pdist(X, metric) cluster_matrix = sp.cluster.hierarchy.complete(D) # merge clusters, rotating them to make the end points match as best we can sets = [[i] for i in range(X.shape[0])] for i in range(cluster_matrix.shape[0]): s1 = sets[int(cluster_matrix[i,0])] s2 = sets[int(cluster_matrix[i,1])] # compute distances between the end points of the lists d_s1_s2 = pdist(np.vstack([X[s1[-1],:], X[s2[0],:]]), metric)[0] d_s2_s1 = pdist(np.vstack([X[s1[0],:], X[s2[-1],:]]), metric)[0] d_s1r_s2 = pdist(np.vstack([X[s1[0],:], X[s2[0],:]]), metric)[0] d_s1_s2r = pdist(np.vstack([X[s1[-1],:], X[s2[-1],:]]), metric)[0] # concatenete the lists in the way the minimizes the difference between # the samples at the junction best = min(d_s1_s2, d_s2_s1, d_s1r_s2, d_s1_s2r) if best == d_s1_s2: sets.append(s1 + s2) elif best == d_s2_s1: sets.append(s2 + s1) elif best == d_s1r_s2: sets.append(list(reversed(s1)) + s2) else: sets.append(s1 + list(reversed(s2))) return sets[-1]
python
def hclust_ordering(X, metric="sqeuclidean"): """ A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar. """ # compute a hierarchical clustering D = sp.spatial.distance.pdist(X, metric) cluster_matrix = sp.cluster.hierarchy.complete(D) # merge clusters, rotating them to make the end points match as best we can sets = [[i] for i in range(X.shape[0])] for i in range(cluster_matrix.shape[0]): s1 = sets[int(cluster_matrix[i,0])] s2 = sets[int(cluster_matrix[i,1])] # compute distances between the end points of the lists d_s1_s2 = pdist(np.vstack([X[s1[-1],:], X[s2[0],:]]), metric)[0] d_s2_s1 = pdist(np.vstack([X[s1[0],:], X[s2[-1],:]]), metric)[0] d_s1r_s2 = pdist(np.vstack([X[s1[0],:], X[s2[0],:]]), metric)[0] d_s1_s2r = pdist(np.vstack([X[s1[-1],:], X[s2[-1],:]]), metric)[0] # concatenete the lists in the way the minimizes the difference between # the samples at the junction best = min(d_s1_s2, d_s2_s1, d_s1r_s2, d_s1_s2r) if best == d_s1_s2: sets.append(s1 + s2) elif best == d_s2_s1: sets.append(s2 + s1) elif best == d_s1r_s2: sets.append(list(reversed(s1)) + s2) else: sets.append(s1 + list(reversed(s2))) return sets[-1]
[ "def", "hclust_ordering", "(", "X", ",", "metric", "=", "\"sqeuclidean\"", ")", ":", "# compute a hierarchical clustering", "D", "=", "sp", ".", "spatial", ".", "distance", ".", "pdist", "(", "X", ",", "metric", ")", "cluster_matrix", "=", "sp", ".", "cluster", ".", "hierarchy", ".", "complete", "(", "D", ")", "# merge clusters, rotating them to make the end points match as best we can", "sets", "=", "[", "[", "i", "]", "for", "i", "in", "range", "(", "X", ".", "shape", "[", "0", "]", ")", "]", "for", "i", "in", "range", "(", "cluster_matrix", ".", "shape", "[", "0", "]", ")", ":", "s1", "=", "sets", "[", "int", "(", "cluster_matrix", "[", "i", ",", "0", "]", ")", "]", "s2", "=", "sets", "[", "int", "(", "cluster_matrix", "[", "i", ",", "1", "]", ")", "]", "# compute distances between the end points of the lists", "d_s1_s2", "=", "pdist", "(", "np", ".", "vstack", "(", "[", "X", "[", "s1", "[", "-", "1", "]", ",", ":", "]", ",", "X", "[", "s2", "[", "0", "]", ",", ":", "]", "]", ")", ",", "metric", ")", "[", "0", "]", "d_s2_s1", "=", "pdist", "(", "np", ".", "vstack", "(", "[", "X", "[", "s1", "[", "0", "]", ",", ":", "]", ",", "X", "[", "s2", "[", "-", "1", "]", ",", ":", "]", "]", ")", ",", "metric", ")", "[", "0", "]", "d_s1r_s2", "=", "pdist", "(", "np", ".", "vstack", "(", "[", "X", "[", "s1", "[", "0", "]", ",", ":", "]", ",", "X", "[", "s2", "[", "0", "]", ",", ":", "]", "]", ")", ",", "metric", ")", "[", "0", "]", "d_s1_s2r", "=", "pdist", "(", "np", ".", "vstack", "(", "[", "X", "[", "s1", "[", "-", "1", "]", ",", ":", "]", ",", "X", "[", "s2", "[", "-", "1", "]", ",", ":", "]", "]", ")", ",", "metric", ")", "[", "0", "]", "# concatenete the lists in the way the minimizes the difference between", "# the samples at the junction", "best", "=", "min", "(", "d_s1_s2", ",", "d_s2_s1", ",", "d_s1r_s2", ",", "d_s1_s2r", ")", "if", "best", "==", "d_s1_s2", ":", "sets", ".", "append", "(", "s1", "+", "s2", ")", "elif", "best", "==", "d_s2_s1", ":", "sets", ".", "append", "(", "s2", "+", "s1", ")", "elif", "best", "==", "d_s1r_s2", ":", "sets", ".", "append", "(", "list", "(", "reversed", "(", "s1", ")", ")", "+", "s2", ")", "else", ":", "sets", ".", "append", "(", "s1", "+", "list", "(", "reversed", "(", "s2", ")", ")", ")", "return", "sets", "[", "-", "1", "]" ]
A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar.
[ "A", "leaf", "ordering", "is", "under", "-", "defined", "this", "picks", "the", "ordering", "that", "keeps", "nearby", "samples", "similar", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/common.py#L215-L247
24,199
slundberg/shap
shap/common.py
approximate_interactions
def approximate_interactions(index, shap_values, X, feature_names=None): """ Order other features by how much interaction they seem to have with the feature at the given index. This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction index values for SHAP see the interaction_contribs option implemented in XGBoost. """ # convert from DataFrames if we got any if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"): if feature_names is None: feature_names = X.columns X = X.values index = convert_name(index, shap_values, feature_names) if X.shape[0] > 10000: a = np.arange(X.shape[0]) np.random.shuffle(a) inds = a[:10000] else: inds = np.arange(X.shape[0]) x = X[inds, index] srt = np.argsort(x) shap_ref = shap_values[inds, index] shap_ref = shap_ref[srt] inc = max(min(int(len(x) / 10.0), 50), 1) interactions = [] for i in range(X.shape[1]): val_other = X[inds, i][srt].astype(np.float) v = 0.0 if not (i == index or np.sum(np.abs(val_other)) < 1e-8): for j in range(0, len(x), inc): if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0: v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1]) val_v = v val_other = np.isnan(X[inds, i][srt].astype(np.float)) v = 0.0 if not (i == index or np.sum(np.abs(val_other)) < 1e-8): for j in range(0, len(x), inc): if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0: v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1]) nan_v = v interactions.append(max(val_v, nan_v)) return np.argsort(-np.abs(interactions))
python
def approximate_interactions(index, shap_values, X, feature_names=None): """ Order other features by how much interaction they seem to have with the feature at the given index. This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction index values for SHAP see the interaction_contribs option implemented in XGBoost. """ # convert from DataFrames if we got any if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"): if feature_names is None: feature_names = X.columns X = X.values index = convert_name(index, shap_values, feature_names) if X.shape[0] > 10000: a = np.arange(X.shape[0]) np.random.shuffle(a) inds = a[:10000] else: inds = np.arange(X.shape[0]) x = X[inds, index] srt = np.argsort(x) shap_ref = shap_values[inds, index] shap_ref = shap_ref[srt] inc = max(min(int(len(x) / 10.0), 50), 1) interactions = [] for i in range(X.shape[1]): val_other = X[inds, i][srt].astype(np.float) v = 0.0 if not (i == index or np.sum(np.abs(val_other)) < 1e-8): for j in range(0, len(x), inc): if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0: v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1]) val_v = v val_other = np.isnan(X[inds, i][srt].astype(np.float)) v = 0.0 if not (i == index or np.sum(np.abs(val_other)) < 1e-8): for j in range(0, len(x), inc): if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0: v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1]) nan_v = v interactions.append(max(val_v, nan_v)) return np.argsort(-np.abs(interactions))
[ "def", "approximate_interactions", "(", "index", ",", "shap_values", ",", "X", ",", "feature_names", "=", "None", ")", ":", "# convert from DataFrames if we got any", "if", "str", "(", "type", "(", "X", ")", ")", ".", "endswith", "(", "\"'pandas.core.frame.DataFrame'>\"", ")", ":", "if", "feature_names", "is", "None", ":", "feature_names", "=", "X", ".", "columns", "X", "=", "X", ".", "values", "index", "=", "convert_name", "(", "index", ",", "shap_values", ",", "feature_names", ")", "if", "X", ".", "shape", "[", "0", "]", ">", "10000", ":", "a", "=", "np", ".", "arange", "(", "X", ".", "shape", "[", "0", "]", ")", "np", ".", "random", ".", "shuffle", "(", "a", ")", "inds", "=", "a", "[", ":", "10000", "]", "else", ":", "inds", "=", "np", ".", "arange", "(", "X", ".", "shape", "[", "0", "]", ")", "x", "=", "X", "[", "inds", ",", "index", "]", "srt", "=", "np", ".", "argsort", "(", "x", ")", "shap_ref", "=", "shap_values", "[", "inds", ",", "index", "]", "shap_ref", "=", "shap_ref", "[", "srt", "]", "inc", "=", "max", "(", "min", "(", "int", "(", "len", "(", "x", ")", "/", "10.0", ")", ",", "50", ")", ",", "1", ")", "interactions", "=", "[", "]", "for", "i", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", ":", "val_other", "=", "X", "[", "inds", ",", "i", "]", "[", "srt", "]", ".", "astype", "(", "np", ".", "float", ")", "v", "=", "0.0", "if", "not", "(", "i", "==", "index", "or", "np", ".", "sum", "(", "np", ".", "abs", "(", "val_other", ")", ")", "<", "1e-8", ")", ":", "for", "j", "in", "range", "(", "0", ",", "len", "(", "x", ")", ",", "inc", ")", ":", "if", "np", ".", "std", "(", "val_other", "[", "j", ":", "j", "+", "inc", "]", ")", ">", "0", "and", "np", ".", "std", "(", "shap_ref", "[", "j", ":", "j", "+", "inc", "]", ")", ">", "0", ":", "v", "+=", "abs", "(", "np", ".", "corrcoef", "(", "shap_ref", "[", "j", ":", "j", "+", "inc", "]", ",", "val_other", "[", "j", ":", "j", "+", "inc", "]", ")", "[", "0", ",", "1", "]", ")", "val_v", "=", "v", "val_other", "=", "np", ".", "isnan", "(", "X", "[", "inds", ",", "i", "]", "[", "srt", "]", ".", "astype", "(", "np", ".", "float", ")", ")", "v", "=", "0.0", "if", "not", "(", "i", "==", "index", "or", "np", ".", "sum", "(", "np", ".", "abs", "(", "val_other", ")", ")", "<", "1e-8", ")", ":", "for", "j", "in", "range", "(", "0", ",", "len", "(", "x", ")", ",", "inc", ")", ":", "if", "np", ".", "std", "(", "val_other", "[", "j", ":", "j", "+", "inc", "]", ")", ">", "0", "and", "np", ".", "std", "(", "shap_ref", "[", "j", ":", "j", "+", "inc", "]", ")", ">", "0", ":", "v", "+=", "abs", "(", "np", ".", "corrcoef", "(", "shap_ref", "[", "j", ":", "j", "+", "inc", "]", ",", "val_other", "[", "j", ":", "j", "+", "inc", "]", ")", "[", "0", ",", "1", "]", ")", "nan_v", "=", "v", "interactions", ".", "append", "(", "max", "(", "val_v", ",", "nan_v", ")", ")", "return", "np", ".", "argsort", "(", "-", "np", ".", "abs", "(", "interactions", ")", ")" ]
Order other features by how much interaction they seem to have with the feature at the given index. This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction index values for SHAP see the interaction_contribs option implemented in XGBoost.
[ "Order", "other", "features", "by", "how", "much", "interaction", "they", "seem", "to", "have", "with", "the", "feature", "at", "the", "given", "index", "." ]
b280cb81d498b9d98565cad8dd16fc88ae52649f
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/common.py#L271-L318