desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'A container for layer\'s parameters. Parameters init : str, default \'glorot_uniform\'. The name of the weight initialization function. scale : float, default 0.5 bias : float, default 1.0 Initial values for bias. regularizers : dict Weight regularizers. >>> {\'W\' : L2()} constraints : dict Weight constraints. >>> {\...
def __init__(self, init='glorot_uniform', scale=0.5, bias=1.0, regularizers=None, constraints=None):
if (constraints is None): self.constraints = {} else: self.constraints = constraints if (regularizers is None): self.regularizers = {} else: self.regularizers = regularizers self.initial_bias = bias self.scale = scale self.init = get_initializer(init) self...
'Init gradient arrays corresponding to each weight array.'
def init_grad(self):
for key in self._params.keys(): if (key not in self._grads): self._grads[key] = np.zeros_like(self._params[key])
'Increase specific weight by amount of the step parameter.'
def step(self, name, step):
self._params[name] += step if (name in self.constraints): self._params[name] = self.constraints[name].clip(self._params[name])
'Update gradient values.'
def update_grad(self, name, value):
self._grads[name] = value if (name in self.regularizers): self._grads[name] += self.regularizers[name](self._params[name])
'Count the number of parameters in this layer.'
@property def n_params(self):
return sum([np.prod(self._params[x].shape) for x in self._params.keys()])
'A 2D convolutional layer. Input shape: (n_images, n_channels, height, width) Parameters n_filters : int, default 8 The number of filters (kernels). filter_shape : tuple(int, int), default (3, 3) The shape of the filters. (height, width) parameters : Parameters instance, default None stride : tuple(int, int), default (...
def __init__(self, n_filters=8, filter_shape=(3, 3), padding=(0, 0), stride=(1, 1), parameters=None):
self.padding = padding self._params = parameters self.stride = stride self.filter_shape = filter_shape self.n_filters = n_filters if (self._params is None): self._params = Parameters()
'Max pooling layer. Input shape: (n_images, n_channels, height, width) Parameters pool_shape : tuple(int, int), default (2, 2) stride : tuple(int, int), default (1,1) padding : tuple(int, int), default (0,0)'
def __init__(self, pool_shape=(2, 2), stride=(1, 1), padding=(0, 0)):
self.pool_shape = pool_shape self.stride = stride self.padding = padding
'Allocates initial weights.'
def setup(self, X_shape):
pass
'Returns shape of the current layer.'
def shape(self, x_shape):
raise NotImplementedError()
'A fully connected layer. Parameters output_dim : int'
def __init__(self, output_dim, parameters=None):
self._params = parameters self.output_dim = output_dim self.last_input = None if (parameters is None): self._params = Parameters()
'Naming convention: i : input gate f : forget gate c : cell o : output gate Parameters x_shape : np.array(batch size, time steps, input shape)'
def setup(self, x_shape):
self.input_dim = x_shape[2] W_params = ['W_i', 'W_f', 'W_o', 'W_c'] U_params = ['U_i', 'U_f', 'U_o', 'U_c'] b_params = ['b_i', 'b_f', 'b_o', 'b_c'] for param in W_params: self._params[param] = self._params.init((self.input_dim, self.hidden_dim)) for param in U_params: self._param...
'Parameters x_shape : np.array(batch size, time steps, input shape)'
def setup(self, x_shape):
self.input_dim = x_shape[2] self._params['W'] = self._params.init((self.input_dim, self.hidden_dim)) self._params['b'] = np.full((self.hidden_dim,), self._params.initial_bias) self._params['U'] = self.inner_init((self.hidden_dim, self.hidden_dim)) self._params.init_grad() self.hprev = np.zeros((...
'Initialize model\'s layers.'
def _setup_layers(self, x_shape):
x_shape = list(x_shape) x_shape[0] = self.batch_size for layer in self.layers: layer.setup(x_shape) x_shape = layer.shape(x_shape) self._n_layers = len(self.layers) self.optimizer.setup(self) self._initialized = True logging.info(('Total parameters: %s' % self.n_params)...
'Find entry layer for back propagation.'
def _find_bprop_entry(self):
if ((len(self.layers) > 0) and (not hasattr(self.layers[(-1)], 'parameters'))): return (-1) return len(self.layers)
'Forward propagation.'
def fprop(self, X):
for layer in self.layers: X = layer.forward_pass(X) return X
'Returns a list of all parameters.'
@property def parameters(self):
params = [] for layer in self.parametric_layers: params.append(layer.parameters) return params
'Calculate an error for given examples.'
def error(self, X=None, y=None):
training_phase = self.is_training if training_phase: self.is_training = False if ((X is None) and (y is None)): y_pred = self._predict(self.X) score = self.metric(self.y, y_pred) else: y_pred = self._predict(X) score = self.metric(y, y_pred) if training_phase:...
'Shuffle rows in the dataset.'
def shuffle_dataset(self):
n_samples = self.X.shape[0] indices = np.arange(n_samples) np.random.shuffle(indices) self.X = self.X.take(indices, axis=0) self.y = self.y.take(indices, axis=0)
'Returns the number of layers.'
@property def n_layers(self):
return self._n_layers
'Return the number of trainable parameters.'
@property def n_params(self):
return sum([layer.parameters.n_params for layer in self.parametric_layers])
'Initialize package for parsing Parameters package_name : string Name of the top-level package. *package_name* must be the name of an importable package package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (i...
def __init__(self, package_name, package_skip_patterns=None, module_skip_patterns=None, class_skip_patterns=None):
if (package_skip_patterns is None): package_skip_patterns = [u'\\.tests$'] if (module_skip_patterns is None): module_skip_patterns = [u'\\.setup$', u'\\._'] if class_skip_patterns: self.class_skip_patterns = class_skip_patterns else: self.class_skip_patterns = [] self...
'Set package_name'
def set_package_name(self, package_name):
self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0]
'Convert uri to absolute filepath Parameters uri : string URI of python module to return path for Returns path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI'
def _uri2path(self, uri):
if (uri == self.package_name): return os.path.join(self.root_path, u'__init__.py') path = uri.replace(u'.', os.path.sep) path = path.replace((self.package_name + os.path.sep), u'') path = os.path.join(self.root_path, path) if os.path.exists((path + u'.py')): path += u'.py' elif o...
'Convert directory path to uri'
def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, u'.')
'Parse module defined in *uri*'
def _parse_module(self, uri):
filename = self._uri2path(uri) if (filename is None): return ([], []) f = open(filename, u'rt') (functions, classes) = self._parse_lines(f, uri) f.close() return (functions, classes)
'Parse lines of text for functions and classes'
def _parse_lines(self, linesource, module):
functions = [] classes = [] for line in linesource: if (line.startswith(u'def ') and line.count(u'(')): name = self._get_object_name(line) if (not name.startswith(u'_')): functions.append(name) elif line.startswith(u'class '): name = ...
'Check input and output specs in an uri Parameters uri : string python location of module - e.g \'sphinx.builder\' Returns'
def test_specs(self, uri):
(_, classes) = self._parse_module(uri) if (not classes): return None uri_short = re.sub((u'^%s\\.' % self.package_name), u'', uri) allowed_keys = [u'desc', u'genfile', u'xor', u'requires', u'desc', u'nohash', u'argstr', u'position', u'mandatory', u'copyfile', u'usedefault', u'sep', u'hash_files'...
'Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples >>> dw = ApiDocWriter(\'sphinx\') >>> dw._survives_exclude(\'sphinx.okpkg\', \'package\') True >>> dw.package_skip_patterns.append(\'^\.badpkg$\') >>> dw._survives_exclude(\'sphinx.badpkg\', \'pack...
def _survives_exclude(self, matchstr, match_type):
if (match_type == u'module'): patterns = self.module_skip_patterns elif (match_type == u'package'): patterns = self.package_skip_patterns elif (match_type == u'class'): patterns = self.class_skip_patterns else: raise ValueError((u'Cannot interpret match type "...
'Return module sequence discovered from ``self.package_name`` Parameters None Returns mods : sequence Sequence of module names within ``self.package_name`` Examples'
def discover_modules(self):
modules = [self.package_name] for (dirpath, dirnames, filenames) in os.walk(self.root_path): root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: package_uri = u'.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self....
'Initialize package for parsing Parameters package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default \'.rst\' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving ...
def __init__(self, package_name, rst_extension=u'.rst', package_skip_patterns=None, module_skip_patterns=None):
if (package_skip_patterns is None): package_skip_patterns = [u'\\.tests$'] if (module_skip_patterns is None): module_skip_patterns = [u'\\.setup$', u'\\._'] self.package_name = package_name self.rst_extension = rst_extension self.package_skip_patterns = package_skip_patterns self...
'Set package_name >>> docwriter = ApiDocWriter(\'sphinx\') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = \'docutils\' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True'
def set_package_name(self, package_name):
self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] self.written_modules = None
'Get second token in line >>> docwriter = ApiDocWriter(\'sphinx\') >>> docwriter._get_object_name(" def func(): ") # doctest: +ALLOW_UNICODE u\'func\' >>> docwriter._get_object_name(" class Klass(object): ") # doctest: +ALLOW_UNICODE \'Klass\' >>> docwriter._get_object_name(" class Klass: ") # doctest: +ALLOW_UNI...
def _get_object_name(self, line):
name = line.split()[1].split(u'(')[0].strip() return name.rstrip(u':')
'Convert uri to absolute filepath Parameters uri : string URI of python module to return path for Returns path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples >>> docwriter = ApiDocWriter(\'sphinx\') >>> import sphinx >>> modpath = sphinx...
def _uri2path(self, uri):
if (uri == self.package_name): return os.path.join(self.root_path, u'__init__.py') path = uri.replace(u'.', os.path.sep) path = path.replace((self.package_name + os.path.sep), u'') path = os.path.join(self.root_path, path) if os.path.exists((path + u'.py')): path += u'.py' elif o...
'Convert directory path to uri'
def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, u'.')
'Parse module defined in *uri*'
def _parse_module(self, uri):
filename = self._uri2path(uri) if (filename is None): return ([], []) f = open(filename, u'rt') (functions, classes) = self._parse_lines(f) f.close() return (functions, classes)
'Parse lines of text for functions and classes'
def _parse_lines(self, linesource):
functions = [] classes = [] for line in linesource: if (line.startswith(u'def ') and line.count(u'(')): name = self._get_object_name(line) if (not name.startswith(u'_')): functions.append(name) elif line.startswith(u'class '): name = ...
'Make autodoc documentation template string for a module Parameters uri : string python location of module - e.g \'sphinx.builder\' Returns S : string Contents of API doc'
def generate_api_doc(self, uri):
(functions, classes) = self._parse_module(uri) if ((not len(functions)) and (not len(classes))): print((u'WARNING: Empty -', uri)) return u'' uri_short = re.sub((u'^%s\\.' % self.package_name), u'', uri) ad = u'.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' chap_...
'Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples >>> dw = ApiDocWriter(\'sphinx\') >>> dw._survives_exclude(\'sphinx.okpkg\', \'package\') True >>> dw.package_skip_patterns.append(\'^\.badpkg$\') >>> dw._survives_exclude(\'sphinx.badpkg\', \'pack...
def _survives_exclude(self, matchstr, match_type):
if (match_type == u'module'): patterns = self.module_skip_patterns elif (match_type == u'package'): patterns = self.package_skip_patterns else: raise ValueError((u'Cannot interpret match type "%s"' % match_type)) L = len(self.package_name) if (matchstr[:L] == self...
'Return module sequence discovered from ``self.package_name`` Parameters None Returns mods : sequence Sequence of module names within ``self.package_name`` Examples >>> dw = ApiDocWriter(\'sphinx\') >>> mods = dw.discover_modules() >>> \'sphinx.util\' in mods True >>> dw.package_skip_patterns.append(\'\.util$\') >>> \'...
def discover_modules(self):
modules = [] for (dirpath, dirnames, filenames) in os.walk(self.root_path): root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: package_uri = u'.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self._survives_exclude...
'Generate API reST files. Parameters outdir : string Directory name in which to store files We create automatic filenames for each module Returns None Notes Sets self.written_modules to list of written modules'
def write_api_docs(self, outdir):
if (not os.path.exists(outdir)): os.mkdir(outdir) modules = self.discover_modules() self.write_modules_api(modules, outdir)
'Make a reST API index file from written files Parameters path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to \'gen\'. We add ``self.rst_extension``. relative_to : string...
def write_index(self, outdir, froot=u'gen', relative_to=None):
if (self.written_modules is None): raise ValueError(u'No modules written') path = os.path.join(outdir, (froot + self.rst_extension)) if (relative_to is not None): relpath = outdir.replace((relative_to + os.path.sep), u'') else: relpath = outdir idx = open(path, u'wt') ...
'Initialize package for parsing Parameters package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default \'.rst\' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving ...
def __init__(self, package_name, rst_extension=u'.rst', package_skip_patterns=None, module_skip_patterns=None, class_skip_patterns=None):
if (package_skip_patterns is None): package_skip_patterns = [u'\\.tests$'] if (module_skip_patterns is None): module_skip_patterns = [u'\\.setup$', u'\\._'] if class_skip_patterns: self.class_skip_patterns = class_skip_patterns else: self.class_skip_patterns = [] self...
'Set package_name >>> docwriter = ApiDocWriter(\'sphinx\') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = \'docutils\' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True'
def set_package_name(self, package_name):
self._package_name = package_name self.root_module = __import__(package_name) self.root_path = self.root_module.__path__[0] self.written_modules = None
'Get second token in line >>> docwriter = ApiDocWriter(\'sphinx\') >>> docwriter._get_object_name(" def func(): ") # doctest: +ALLOW_UNICODE u\'func\' >>> docwriter._get_object_name(" class Klass(object): ") # doctest: +ALLOW_UNICODE \'Klass\' >>> docwriter._get_object_name(" class Klass: ") # doctest: +ALLOW_UNI...
def _get_object_name(self, line):
name = line.split()[1].split(u'(')[0].strip() return name.rstrip(u':')
'Convert uri to absolute filepath Parameters uri : string URI of python module to return path for Returns path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples >>> docwriter = ApiDocWriter(\'sphinx\') >>> import sphinx >>> modpath = sphinx...
def _uri2path(self, uri):
if (uri == self.package_name): return os.path.join(self.root_path, u'__init__.py') path = uri.replace(u'.', os.path.sep) path = path.replace((self.package_name + os.path.sep), u'') path = os.path.join(self.root_path, path) if os.path.exists((path + u'.py')): path += u'.py' elif o...
'Convert directory path to uri'
def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, u'.')
'Parse module defined in *uri*'
def _parse_module(self, uri):
filename = self._uri2path(uri) if (filename is None): return ([], []) f = open(filename, u'rt') (functions, classes) = self._parse_lines(f, uri) f.close() return (functions, classes)
'Parse lines of text for functions and classes'
def _parse_lines(self, linesource, module):
functions = [] classes = [] for line in linesource: if (line.startswith(u'def ') and line.count(u'(')): name = self._get_object_name(line) if (not name.startswith(u'_')): functions.append(name) elif line.startswith(u'class '): name = ...
'Make autodoc documentation template string for a module Parameters uri : string python location of module - e.g \'sphinx.builder\' Returns S : string Contents of API doc'
def generate_api_doc(self, uri):
(functions, classes) = self._parse_module(uri) workflows = [] helper_functions = [] for function in functions: try: __import__(uri) finst = sys.modules[uri].__dict__[function] except TypeError: continue try: workflow = finst() ...
'Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples >>> dw = ApiDocWriter(\'sphinx\') >>> dw._survives_exclude(\'sphinx.okpkg\', \'package\') True >>> dw.package_skip_patterns.append(\'^\.badpkg$\') >>> dw._survives_exclude(\'sphinx.badpkg\', \'pack...
def _survives_exclude(self, matchstr, match_type):
if (match_type == u'module'): patterns = self.module_skip_patterns elif (match_type == u'package'): patterns = self.package_skip_patterns elif (match_type == u'class'): patterns = self.class_skip_patterns else: raise ValueError((u'Cannot interpret match type "...
'Return module sequence discovered from ``self.package_name`` Parameters None Returns mods : sequence Sequence of module names within ``self.package_name`` Examples >>> dw = ApiDocWriter(\'sphinx\') >>> mods = dw.discover_modules() >>> \'sphinx.util\' in mods True >>> dw.package_skip_patterns.append(\'\.util$\') >>> \'...
def discover_modules(self):
modules = [self.package_name] for (dirpath, dirnames, filenames) in os.walk(self.root_path): root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: package_uri = u'.'.join((root_uri, dirname)) if (self._uri2path(package_uri) and self....
'Generate API reST files. Parameters outdir : string Directory name in which to store files We create automatic filenames for each module Returns None Notes Sets self.written_modules to list of written modules'
def write_api_docs(self, outdir):
if (not os.path.exists(outdir)): os.mkdir(outdir) modules = self.discover_modules() self.write_modules_api(modules, outdir)
'Make a reST API index file from written files Parameters path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to \'gen\'. We add ``self.rst_extension``. relative_to : string...
def write_index(self, outdir, froot=u'gen', relative_to=None):
if (self.written_modules is None): raise ValueError(u'No modules written') path = os.path.join(outdir, (froot + self.rst_extension)) if (relative_to is not None): relpath = outdir.replace((relative_to + os.path.sep), u'') else: relpath = outdir idx = open(path, u'wt') ...
'Executes a pre-defined pipeline is distributed approaches based on IPython\'s ipyparallel processing interface'
def run(self, graph, config, updatehash=False):
try: name = u'ipyparallel' __import__(name) self.iparallel = sys.modules[name] except ImportError as e: raise_from(ImportError(u'ipyparallel not found. Parallel execution will be unavailable'), e) try: self.taskclient = self.iparallel.Client(**sel...
'Executes a pre-defined pipeline in a serial order. Parameters graph : networkx digraph defines order of execution'
def run(self, graph, config, updatehash=False):
if (not isinstance(graph, nx.DiGraph)): raise ValueError(u'Input must be a networkx digraph object') logger.info(u'Running serially.') old_wd = os.getcwd() notrun = [] donotrun = [] (nodes, _) = topological_sort(graph) for node in nodes: try: ...
'Initialize runtime attributes to none procs: list (N) of underlying interface elements to be processed proc_done: a boolean vector (N) signifying whether a process has been executed proc_pending: a boolean vector (N) signifying whether a process is currently running. Note: A process is finished only when both proc_don...
def __init__(self, plugin_args=None):
super(DistributedPluginBase, self).__init__(plugin_args=plugin_args) self.procs = None self.depidx = None self.refidx = None self.mapnodes = None self.mapnodesubids = None self.proc_done = None self.proc_pending = None self.max_jobs = np.inf if (plugin_args and (u'max_jobs' in pl...
'Executes a pre-defined pipeline using distributed approaches'
def run(self, graph, config, updatehash=False):
logger.info(u'Running in parallel.') self._config = config self._generate_dependency_list(graph) self.pending_tasks = [] self.readytorun = [] self.mapnodes = [] self.mapnodesubids = {} notrun = [] while (np.any((self.proc_done == False)) | np.any((self.proc_pending == True))): ...
'Sends jobs to workers'
def _send_procs_to_workers(self, updatehash=False, graph=None):
while np.any((self.proc_done == False)): num_jobs = len(self.pending_tasks) if np.isinf(self.max_jobs): slots = None else: slots = max(0, (self.max_jobs - num_jobs)) logger.debug((u'Slots available: %s' % slots)) if ((num_jobs >= self.max_jobs) o...
'Extract outputs and assign to inputs of dependent tasks This is called when a job is completed.'
def _task_finished_cb(self, jobid):
logger.info((u'[Job finished] jobname: %s jobid: %d' % (self.procs[jobid]._id, jobid))) if self._status_callback: self._status_callback(self.procs[jobid], u'end') self.proc_pending[jobid] = False rowview = self.depidx.getrowview(jobid) rowview[rowview.nonzero()] = 0 if (jo...
'Generates a dependency list for a list of graphs.'
def _generate_dependency_list(self, graph):
(self.procs, _) = topological_sort(graph) try: self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs, format=u'lil') except: self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs) self.refidx = deepcopy(self.depidx) self.refidx.astype = np.int self.proc_...
'Removes directories whose outputs have already been used up'
def _remove_node_dirs(self):
if str2bool(self._config[u'execution'][u'remove_node_directories']): for idx in np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0]: if (idx in self.mapnodesubids): continue if (self.proc_done[idx] and (not self.proc_pending[idx])): self.refidx[(...
'Check if a task is pending in the batch system'
def _is_pending(self, taskid):
raise NotImplementedError
'Submit a task to the batch system'
def _submit_batchtask(self, scriptfile, node):
raise NotImplementedError
'submit job and return taskid'
def _submit_job(self, node, updatehash=False):
pyscript = create_pyscript(node, updatehash=updatehash) (batch_dir, name) = os.path.split(pyscript) name = u'.'.join(name.split(u'.')[:(-1)]) batchscript = u'\n'.join((self._template, (u'%s %s' % (sys.executable, pyscript)))) batchscriptfile = os.path.join(batch_dir, (u'batchscript_%s.sh' % name)...
'pyfiles: list of files corresponding to a topological sort dependencies: dictionary of dependencies based on the toplogical sort'
def _submit_graph(self, pyfiles, dependencies, nodes):
raise NotImplementedError
'This is more or less the _submit_batchtask from sge.py with flipped variable names, different command line switches, and different output formatting/processing'
def _submit_batchtask(self, scriptfile, node):
cmd = CommandLine(u'sbatch', environ=dict(os.environ), terminal_output=u'allatonce') path = os.path.dirname(scriptfile) sbatch_args = u'' if self._sbatch_args: sbatch_args = self._sbatch_args if (u'sbatch_args' in node.plugin_args): if ((u'overwrite' in node.plugin_args) and node.plu...
'Executes a pre-defined pipeline is distributed approaches based on IPython\'s ipyparallel processing interface'
def run(self, graph, config, updatehash=False):
try: name = u'IPython.kernel.client' __import__(name) self.ipyclient = sys.modules[name] except ImportError as e: raise_from(ImportError(u'Ipython kernel not found. Parallel execution will be unavailable'), e) try: self.taskclient = self.ipycli...
'Return True, unless job is in the "zombie" status'
def is_job_state_pending(self):
time_diff = (time.time() - self._job_info_creation_time) if self.is_zombie(): sge_debug_print(u"DONE! QJobInfo.IsPending found in 'zombie' list, returning False so claiming done!\n{0}".format(self)) is_pending_status = False elif (self.is_initializing() and (tim...
':param qstat_instant_executable: :param qstat_cached_executable:'
def __init__(self, qstat_instant_executable=u'qstat', qstat_cached_executable=u'qstat'):
self._qstat_instant_executable = qstat_instant_executable self._qstat_cached_executable = qstat_cached_executable self._out_of_scope_jobs = list() self._task_dictionary = dict() self._remove_old_jobs()
'This is only called during initialization of the function for the purpose of identifying jobs that are not part of this run of nipype. They are jobs that existed prior to starting a new jobs, so they are irrelevant.'
def _remove_old_jobs(self):
self._run_qstat(u'QstatInitialization', True)
':param taskid: The job id :param qsub_command_line: When initializing, re-use the job_queue_name :return: NONE'
def add_startup_job(self, taskid, qsub_command_line):
taskid = int(taskid) self._task_dictionary[taskid] = QJobInfo(taskid, u'initializing', time.time(), u'noQueue', 1, qsub_command_line)
'request definitive job completion information for the current job from the qacct report'
@staticmethod def _qacct_verified_complete(taskid):
sge_debug_print(u'WARNING: CONTACTING qacct for finished jobs, {0}: {1}'.format(time.time(), u'Verifying Completion')) this_command = u'qacct' qacct_retries = 10 is_complete = False while (qacct_retries > 0): qacct_retries -= 1 try: proc = subp...
'request all job information for the current user in xmlformat. See documentation from java documentation: http://arc.liv.ac.uk/SGE/javadocs/jgdi/com/sun/grid/jgdi/monitoring/filter/JobStateFilter.html -s r gives running jobs -s z gives recently completed jobs (**recently** is very ambiguous) -s s suspended jobs'
def _run_qstat(self, reason_for_qstat, force_instant=True):
sge_debug_print(u'WARNING: CONTACTING qmaster for jobs, {0}: {1}'.format(time.time(), reason_for_qstat)) if force_instant: this_command = self._qstat_instant_executable else: this_command = self._qstat_cached_executable qstat_retries = 10 while (qstat_retries > ...
'For debugging'
def print_dictionary(self):
for vv in list(self._task_dictionary.values()): sge_debug_print(str(vv))
'Sends jobs to workers when system resources are available. Check memory (gb) and cores usage before running jobs.'
def _send_procs_to_workers(self, updatehash=False, graph=None):
executing_now = [] currently_running_jobids = np.flatnonzero(((self.proc_pending == True) & (self.depidx.sum(axis=0) == 0).__array__())) busy_memory_gb = 0 busy_processors = 0 for jobid in currently_running_jobids: if ((self.procs[jobid]._interface.estimated_memory_gb <= self.memory_gb) and ...
'Executes a pre-defined pipeline in a serial order. Parameters graph : networkx digraph defines order of execution'
def run(self, graph, config, updatehash=False):
if (not isinstance(graph, nx.DiGraph)): raise ValueError(u'Input must be a networkx digraph object') logger.info(u'Executing debug plugin') for node in nx.topological_sort(graph): self._callable(node, graph)
'LSF lists a status of \'PEND\' when a job has been submitted but is waiting to be picked up, and \'RUN\' when it is actively being processed. But _is_pending should return True until a job has finished and is ready to be checked for completeness. So return True if status is either \'PEND\' or \'RUN\''
def _is_pending(self, taskid):
cmd = CommandLine(u'bjobs', terminal_output=u'allatonce') cmd.inputs.args = (u'%d' % taskid) oldlevel = iflogger.level iflogger.setLevel(logging.getLevelName(u'CRITICAL')) result = cmd.run(ignore_exception=True) iflogger.setLevel(oldlevel) if ((u'DONE' in result.runtime.stdout) or (u'EXIT' i...
'Initialize base parameters of a workflow or node Parameters name : string (mandatory) Name of this node. Name must be alphanumeric and not contain any special characters (e.g., \'.\', \'@\'). base_dir : string base output directory (will be hashed before creations) default=None, which results in the use of mkdtemp'
def __init__(self, name=None, base_dir=None):
self.base_dir = base_dir self.config = None self._verify_name(name) self.name = name self._id = self.name self._hierarchy = None
'Clone an EngineBase object Parameters name : string (mandatory) A clone of node or workflow must have a new name'
def clone(self, name):
if ((name is None) or (name == self.name)): raise Exception(u'Cloning requires a new name') self._verify_name(name) clone = deepcopy(self) clone.name = name clone._id = name clone._hierarchy = None return clone
'Parameters interface : interface object node specific interface (fsl.Bet(), spm.Coregister()) name : alphanumeric string node specific name iterables : generator Input field and list to iterate using the pipeline engine for example to iterate over different frac values in fsl.Bet() for a single field the input can be ...
def __init__(self, interface, name, iterables=None, itersource=None, synchronize=False, overwrite=None, needed_outputs=None, run_without_submitting=False, n_procs=1, mem_gb=None, **kwargs):
base_dir = None if (u'base_dir' in kwargs): base_dir = kwargs[u'base_dir'] super(Node, self).__init__(name, base_dir) if (interface is None): raise IOError(u'Interface must be provided') if (not isinstance(interface, Interface)): raise IOError(u'interface must ...
'Return the underlying interface object'
@property def interface(self):
return self._interface
'Return the inputs of the underlying interface'
@property def inputs(self):
return self._interface.inputs
'Return the output fields of the underlying interface'
@property def outputs(self):
return self._interface._outputs()
'Return the location of the output directory for the node'
def output_dir(self):
if (self.base_dir is None): self.base_dir = mkdtemp() outputdir = self.base_dir if self._hierarchy: outputdir = op.join(outputdir, *self._hierarchy.split(u'.')) if self.parameterization: params_str = [u'{}'.format(p) for p in self.parameterization] if (not str2bool(self.c...
'Set interface input value'
def set_input(self, parameter, val):
logger.debug(u'setting nodelevel(%s) input %s = %s', self.name, parameter, to_str(val)) setattr(self.inputs, parameter, deepcopy(val))
'Retrieve a particular output of the node'
def get_output(self, parameter):
val = None if self._result: val = getattr(self._result.outputs, parameter) else: cwd = self.output_dir() (result, _, _) = self._load_resultfile(cwd) if (result and result.outputs): val = getattr(result.outputs, parameter) return val
'Print interface help'
def help(self):
self._interface.help()
'Execute the node in its directory. Parameters updatehash: boolean Update the hash stored in the output directory'
def run(self, updatehash=False):
if (self.config is None): self.config = deepcopy(config._sections) else: self.config = merge_dict(deepcopy(config._sections), self.config) if (not self._got_inputs): self._get_inputs() self._got_inputs = True outdir = self.output_dir() logger.info(u'Executing node ...
'Returns the directory name for the given parameterization string as follows: - If the parameterization is longer than 32 characters, then return the SHA-1 hex digest. - Otherwise, return the parameterization unchanged.'
def _parameterization_dir(self, param):
if (len(param) > 32): return sha1(param.encode()).hexdigest() else: return param
'Return a hash of the input state'
def _get_hashval(self):
if (not self._got_inputs): self._get_inputs() self._got_inputs = True (hashed_inputs, hashvalue) = self.inputs.get_hashval(hash_method=self.config[u'execution'][u'hash_method']) rm_extra = self.config[u'execution'][u'remove_unnecessary_outputs'] if (str2bool(rm_extra) and self.needed_out...
'Retrieve inputs from pointers to results file This mechanism can be easily extended/replaced to retrieve data from other data sources (e.g., XNAT, HTTP, etc.,.)'
def _get_inputs(self):
logger.debug(u'Setting node inputs') for (key, info) in list(self.input_source.items()): logger.debug(u'input: %s', key) results_file = info[0] logger.debug(u'results file: %s', results_file) results = loadpkl(results_file) output_value = Undefined ...
'Load results if it exists in cwd Parameter cwd : working directory of node Returns result : InterfaceResult structure aggregate : boolean indicating whether node should aggregate_outputs attribute error : boolean indicating whether there was some mismatch in versions of traits used to store result and hence node needs...
def _load_resultfile(self, cwd):
aggregate = True resultsoutputfile = op.join(cwd, (u'result_%s.pklz' % self.name)) result = None attribute_error = False if op.exists(resultsoutputfile): pkl_file = gzip.open(resultsoutputfile, u'rb') try: result = pickle.load(pkl_file) except UnicodeDecodeError: ...
'copy files over and change the inputs'
def _copyfiles_to_wd(self, outdir, execute, linksonly=False):
if hasattr(self._interface, u'_get_filecopy_info'): logger.debug(u'copying files to wd [execute=%s, linksonly=%s]', str(execute), str(linksonly)) if (execute and linksonly): olddir = outdir outdir = op.join(outdir, u'_tempinput') os.makedirs(outdir)...
'Parameters interface : interface object node specific interface (fsl.Bet(), spm.Coregister()) name : alphanumeric string node specific name joinsource : node name name of the join predecessor iterable node joinfield : string or list of strings name(s) of list input fields that will be aggregated. The default is all of...
def __init__(self, interface, name, joinsource, joinfield=None, unique=False, **kwargs):
super(JoinNode, self).__init__(interface, name, **kwargs) self.joinsource = joinsource u'the join predecessor iterable node' if (not joinfield): joinfield = self._interface.inputs.copyable_trait_names() elif isinstance(joinfield, (str, bytes)): joinfield = [joinfield] ...
'Set the joinsource property. If the given value is a Node, then the joinsource is set to the node name.'
@joinsource.setter def joinsource(self, value):
if isinstance(value, Node): value = value.name self._joinsource = value
'The JoinNode inputs include the join field overrides.'
@property def inputs(self):
return self._inputs
'Add new join item fields assigned to the next iterated input This method is intended solely for workflow graph expansion. Examples >>> from nipype.interfaces.utility import IdentityInterface >>> import nipype.pipeline.engine as pe >>> from nipype import Node, JoinNode, Workflow >>> inputspec = Node(IdentityInterface(f...
def _add_join_item_fields(self):
idx = self._next_slot_index newfields = dict([(field, self._add_join_item_field(field, idx)) for field in self.joinfield]) logger.debug(u'Added the %s join item fields %s.', self, newfields) self._next_slot_index += 1 return newfields
'Add new join item fields qualified by the given index Return the new field name'
def _add_join_item_field(self, field, index):
name = self._join_item_field_name(field, index) trait = self._inputs.trait(field, False, True) self._inputs.add_trait(name, trait) return name
'Return the field suffixed by the index + 1'
def _join_item_field_name(self, field, index):
return (u'%sJ%d' % (field, (index + 1)))
'Convert the given join fields to accept an input that is a list item rather than a list. Non-join fields delegate to the interface traits. Return the override DynamicTraitedSpec'
def _override_join_traits(self, basetraits, fields):
dyntraits = DynamicTraitedSpec() if (fields is None): fields = basetraits.copyable_trait_names() else: for field in fields: if (not basetraits.trait(field)): raise ValueError((u'The JoinNode %s does not have a field named %s' % (self.nam...