desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt == u'tpm'): return scans_for_fname(filename_to_list(val)) if (opt == u'image_to_align'): return scans_for_fname(filename_to_list(val)) if (opt == u'apply_to_files'): return scans_for_fnames(filename_to_list(val)) if (opt == u'deformation_file'): return np.array([l...
'validate spm normalize options if set to None ignore'
def _parse_inputs(self, skip=()):
einputs = super(Normalize12, self)._parse_inputs(skip=(u'jobtype', u'apply_to_files')) if isdefined(self.inputs.apply_to_files): inputfiles = deepcopy(self.inputs.apply_to_files) if isdefined(self.inputs.image_to_align): inputfiles.extend([self.inputs.image_to_align]) einputs...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
clean_masks_dict = {u'no': 0, u'light': 1, u'thorough': 2} if (opt in [u'data', u'tissue_prob_maps']): if isinstance(val, list): return scans_for_fnames(val) else: return scans_for_fname(val) if (u'output_type' in opt): return [int(v) for v in val] if (opt...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'channel_files', u'channel_info']): new_channel = {} new_channel[u'vols'] = scans_for_fnames(self.inputs.channel_files) if isdefined(self.inputs.channel_info): info = self.inputs.channel_info new_channel[u'biasreg'] = info[0] new_channel[u'bia...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'image_files']): return scans_for_fnames(val, keep4d=True, separate_sessions=True) elif (opt == u'regularization_form'): mapper = {u'Linear': 0, u'Membrane': 1, u'Bending': 2} return mapper[val] elif (opt == u'iteration_parameters'): params = [] for param...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'template_file']): return np.array([val], dtype=object) elif (opt in [u'flowfield_files']): return scans_for_fnames(val, keep4d=True) elif (opt in [u'apply_to_files']): return scans_for_fnames(val, keep4d=True, separate_sessions=True) elif (opt == u'voxel_size'): ...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'image_files']): return scans_for_fnames(val, keep4d=True, separate_sessions=True) if (opt in [u'flowfield_files']): return scans_for_fnames(val, keep4d=True) else: return super(CreateWarped, self)._format_arg(opt, spec, val)
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'deformation_field', u'reference_volume']): val = [val] if (opt in [u'deformation_field']): return scans_for_fnames(val, keep4d=True, separate_sessions=False) if (opt in [u'in_files', u'reference_volume']): return scans_for_fnames(val, keep4d=False, separate_sessions=Fal...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'in_files']): return scans_for_fnames(val, keep4d=True) elif (opt in [u'spatial_normalization']): if (val == u'low'): return {u'normlow': []} elif (opt in [u'dartel_template']): return np.array([val], dtype=object) elif (opt in [u'deformation_field']): ...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'spm_mat_dir', u'mask_image']): return np.array([str(val)], dtype=object) if (opt in [u'session_info']): if isinstance(val, dict): return [val] else: return val return super(Level1Design, self)._format_arg(opt, spec, val)
'validate spm realign options if set to None ignore'
def _parse_inputs(self):
einputs = super(Level1Design, self)._parse_inputs(skip=u'mask_threshold') for sessinfo in einputs[0][u'sess']: sessinfo[u'scans'] = scans_for_fnames(filename_to_list(sessinfo[u'scans']), keep4d=False) if (not isdefined(self.inputs.spm_mat_dir)): einputs[0][u'dir'] = np.array([str(os.getcwd()...
'validates spm options and generates job structure if mfile is True uses matlab .m file else generates a job structure and saves in .mat'
def _make_matlab_command(self, content):
if isdefined(self.inputs.mask_image): postscript = u'load SPM;\n' postscript += (u"SPM.xM.VM = spm_vol('%s');\n" % list_to_filename(self.inputs.mask_image)) postscript += u'SPM.xM.I = 0;\n' postscript += u'SPM.xM.T = [];\n' postscript += (u'SPM.xM.TH =...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt == u'spm_mat_file'): return np.array([str(val)], dtype=object) if (opt == u'estimation_method'): if isinstance(val, (str, bytes)): return {u'{}'.format(val): 1} else: return val return super(EstimateModel, self)._format_arg(opt, spec, val)
'validate spm realign options if set to None ignore'
def _parse_inputs(self):
einputs = super(EstimateModel, self)._parse_inputs(skip=u'flags') if isdefined(self.inputs.flags): einputs[0].update({flag: val for (flag, val) in self.inputs.flags.items()}) return einputs
'validates spm options and generates job structure'
def _make_matlab_command(self, _):
contrasts = [] cname = [] for (i, cont) in enumerate(self.inputs.contrasts): cname.insert(i, cont[0]) contrasts.insert(i, Bunch(name=cont[0], stat=cont[1], conditions=cont[2], weights=None, sessions=None)) if (len(cont) >= 4): contrasts[i].weights = cont[3] if (le...
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'spm_mat_dir', u'explicit_mask_file']): return np.array([str(val)], dtype=object) if (opt in [u'covariates']): outlist = [] mapping = {u'name': u'cname', u'vector': u'c', u'interaction': u'iCFI', u'centering': u'iCC'} for dictitem in val: outdict = {} ...
'validate spm realign options if set to None ignore'
def _parse_inputs(self):
einputs = super(FactorialDesign, self)._parse_inputs() if (not isdefined(self.inputs.spm_mat_dir)): einputs[0][u'dir'] = np.array([str(os.getcwd())], dtype=object) return einputs
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'in_files']): return np.array(val, dtype=object) return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val)
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'group1_files', u'group2_files']): return np.array(val, dtype=object) return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val)
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'paired_files']): return [dict(scans=np.array(files, dtype=object)) for files in val] return super(PairedTTestDesign, self)._format_arg(opt, spec, val)
'Convert input to appropriate format for spm'
def _format_arg(self, opt, spec, val):
if (opt in [u'in_files']): return np.array(val, dtype=object) if (opt in [u'user_covariates']): outlist = [] mapping = {u'name': u'cname', u'vector': u'c', u'centering': u'iCC'} for dictitem in val: outdict = {} for (key, keyval) in list(dictitem.items()):...
'Check for freesurfer version on system Find which freesurfer is being used....and get version from /path/to/freesurfer/build-stamp.txt Returns version : string version number as string or None if freesurfer version not found'
@staticmethod def version():
fs_home = os.getenv(u'FREESURFER_HOME') if (fs_home is None): return None versionfile = os.path.join(fs_home, u'build-stamp.txt') if (not os.path.exists(versionfile)): return None fid = open(versionfile, u'rt') version = fid.readline() fid.close() return version
'Return a comparable version object If no version found, use LooseVersion(\'0.0.0\')'
@classmethod def looseversion(cls):
ver = cls.version() if (ver is None): return LooseVersion(u'0.0.0') vinfo = ver.rstrip().split(u'-') try: int(vinfo[(-1)], 16) except ValueError: githash = u'' else: githash = (u'.' + vinfo[(-1)]) if githash: if (vinfo[3] == u'dev'): vstr =...
'Check the global SUBJECTS_DIR Parameters subjects_dir : string The system defined subjects directory Returns subject_dir : string Represents the current environment setting of SUBJECTS_DIR'
@classmethod def subjectsdir(cls):
if cls.version(): return os.environ[u'SUBJECTS_DIR'] return None
'Define a generic mapping for a single outfile The filename is potentially autogenerated by suffixing inputs.infile Parameters basename : string (required) filename to base the new filename on fname : string if not None, just use this fname cwd : string prefix paths with cwd, otherwise os.getcwd() suffix : string defau...
def _gen_fname(self, basename, fname=None, cwd=None, suffix=u'_fs', use_ext=True):
if (basename == u''): msg = (u'Unable to generate filename for command %s. ' % self.cmd) msg += u'basename is not set!' raise ValueError(msg) if (cwd is None): cwd = os.getcwd() fname = fname_presuffix(basename, suffix=suffix, use_ext=use_ext, ne...
'Filename normalization routine to perform only when run in Node context'
def _normalize_filenames(self):
pass
'Based on MRIsBuildFileName in freesurfer/utils/mrisurf.c If no path information is provided for out_name, use path and hemisphere (if also unspecified) from in_file to determine the path of the associated file. Use in_file prefix to indicate hemisphere for out_name, rather than inspecting the surface data structure.'
@staticmethod def _associated_file(in_file, out_name):
(path, base) = os.path.split(out_name) if (path == u''): (path, in_file) = os.path.split(in_file) hemis = (u'lh.', u'rh.') if ((in_file[:3] in hemis) and (base[:3] not in hemis)): base = (in_file[:3] + base) return os.path.join(path, base)
'In a Node context, interpret out_file as a literal path to reduce surprise.'
def _normalize_filenames(self):
if isdefined(self.inputs.out_file): self.inputs.out_file = os.path.abspath(self.inputs.out_file)
'Find full paths for pial, thickness and sphere files for copying'
def _normalize_filenames(self):
in_file = self.inputs.in_file pial = self.inputs.pial if (not isdefined(pial)): pial = u'pial' self.inputs.pial = self._associated_file(in_file, pial) if (isdefined(self.inputs.thickness) and self.inputs.thickness): thickness_name = self.inputs.thickness_name if (not isdefine...
'validate fsl bet options if set to None ignore'
def _get_dicomfiles(self):
return glob(os.path.abspath(os.path.join(self.inputs.dicom_dir, u'*-1.dcm')))
'returns output directory'
def _get_outdir(self):
subjid = self.inputs.subject_id if (not isdefined(subjid)): (path, fname) = os.path.split(self._get_dicomfiles()[0]) subjid = int(fname.split(u'-')[0]) if isdefined(self.inputs.subject_dir_template): subjid = (self.inputs.subject_dir_template % subjid) basedir = self.inputs.base_...
'Returns list of dicom series that should be converted. Requires a dicom info summary file generated by ``DicomDirInfo``'
def _get_runs(self):
seq = np.genfromtxt(self.inputs.dicom_info, dtype=object) runs = [] for s in seq: if self.inputs.seq_list: if self.inputs.ignore_single_slice: if ((int(s[8]) > 1) and any([s[12].startswith(sn) for sn in self.inputs.seq_list])): runs.append(int(s[2])) ...
'Returns list of files to be converted'
def _get_filelist(self, outdir):
filemap = {} for f in self._get_dicomfiles(): (head, fname) = os.path.split(f) (fname, ext) = os.path.splitext(fname) fileparts = fname.split(u'-') runno = int(fileparts[1]) out_type = MRIConvert.filemap[self.inputs.out_type] outfile = os.path.join(outdir, u'.'.jo...
'`command` plus any arguments (args) validates arguments and generates command line'
@property def cmdline(self):
self._check_mandatory_inputs() outdir = self._get_outdir() cmd = [] if (not os.path.exists(outdir)): cmdstr = (u'python -c "import os; os.makedirs(\'%s\')"' % outdir) cmd.extend([cmdstr]) infofile = os.path.join(outdir, u'shortinfo.txt') if (not os.path.exists(infofil...
'See io.FreeSurferSource.outputs for the list of outputs returned'
def _list_outputs(self):
if isdefined(self.inputs.subjects_dir): subjects_dir = self.inputs.subjects_dir else: subjects_dir = self._gen_subjects_dir() if isdefined(self.inputs.hemi): hemi = self.inputs.hemi else: hemi = u'both' outputs = self._outputs().get() outputs.update(FreeSurferSour...
'Check for dtk version on system Parameters None Returns version : str Version number as string or None if FSL not found'
@staticmethod def version():
clout = CommandLine(command=u'dti_recon', terminal_output=u'allatonce').run() if (clout.runtime.returncode is not 0): return None dtirecon = clout.runtime.stdout result = re.search(u'dti_recon (.*)\n', dtirecon) version = result.group(0).split()[1] return version
'Read from csv in_file and return an array and ROI names The input file should have a first row containing the names of the ROIs (strings) the rest of the data will be read in and transposed so that the rows (TRs) will becomes the second (and last) dimension of the array'
def _read_csv(self):
first_row = open(self.inputs.in_file).readline() if (not first_row[1].isalpha()): raise ValueError(u'First row of in_file should contain ROI names as strings of characters') roi_names = open(self.inputs.in_file).readline().replace(u'"', u'').strip(u'\n').split(u',') ...
'Read data from the in_file and generate a nitime TimeSeries object'
def _csv2ts(self):
(data, roi_names) = self._read_csv() TS = TimeSeries(data=data, sampling_interval=self.inputs.TR, time_unit=u's') TS.metadata = dict(ROIs=roi_names) return TS
'Generate the output csv files.'
def _make_output_files(self):
for this in zip([self.coherence, self.delay], [u'coherence', u'delay']): tmp_f = tempfile.mkstemp()[1] np.savetxt(tmp_f, this[0], delimiter=u',') fid = open(fname_presuffix(self.inputs.output_csv_file, suffix=(u'_%s' % this[1])), u'w+') fid.write(((u',' + u','.join(self.ROIs)) + u'\n...
'Generate the desired figure and save the files according to self.inputs.output_figure_file'
def _make_output_figures(self):
if (self.inputs.figure_type == u'matrix'): fig_coh = viz.drawmatrix_channels(self.coherence, channel_names=self.ROIs, color_anchor=0) fig_coh.savefig(fname_presuffix(self.inputs.output_figure_file, suffix=u'_coherence')) fig_dt = viz.drawmatrix_channels(self.delay, channel_names=self.ROIs, c...
'Creates a File trait. Parameters value : string The default value for the trait filter : string A wildcard string to filter filenames in the file dialog box used by the attribute trait editor. auto_set : boolean Indicates whether the file editor updates the trait value after every key stroke. exists : boolean Indicate...
def __init__(self, value=u'', filter=None, auto_set=False, entries=0, exists=False, **metadata):
self.filter = filter self.auto_set = auto_set self.entries = entries self.exists = exists if exists: self.info_text = u'an existing file name' super(BaseFile, self).__init__(value, **metadata)
'Validates that a specified value is valid for this trait. Note: The \'fast validator\' version performs this check in C.'
def validate(self, object, name, value):
validated_value = super(BaseFile, self).validate(object, name, value) if (not self.exists): return validated_value elif os.path.isfile(value): return validated_value else: raise TraitError(args=u"The trait '{}' of {} instance is {}, but the path ...
'Creates a File trait. Parameters value : string The default value for the trait filter : string A wildcard string to filter filenames in the file dialog box used by the attribute trait editor. auto_set : boolean Indicates whether the file editor updates the trait value after every key stroke. exists : boolean Indicate...
def __init__(self, value=u'', filter=None, auto_set=False, entries=0, exists=False, **metadata):
super(File, self).__init__(value, filter, auto_set, entries, exists, **metadata)
'Creates a BaseDirectory trait. Parameters value : string The default value for the trait auto_set : boolean Indicates whether the directory editor updates the trait value after every key stroke. exists : boolean Indicates whether the trait value must be an existing directory or not. Default Value *value* or \'\''
def __init__(self, value=u'', auto_set=False, entries=0, exists=False, **metadata):
self.entries = entries self.auto_set = auto_set self.exists = exists if exists: self.info_text = u'an existing directory name' super(BaseDirectory, self).__init__(value, **metadata)
'Validates that a specified value is valid for this trait. Note: The \'fast validator\' version performs this check in C.'
def validate(self, object, name, value):
if isinstance(value, (str, bytes)): if (not self.exists): return value if os.path.isdir(value): return value else: raise TraitError(args=u"The trait '{}' of {} instance is {}, but the path '{}' does not exist."...
'Creates a Directory trait. Parameters value : string The default value for the trait auto_set : boolean Indicates whether the directory editor updates the trait value after every key stroke. exists : boolean Indicates whether the trait value must be an existing directory or not. Default Value *value* or \'\''
def __init__(self, value=u'', auto_set=False, entries=0, exists=False, **metadata):
super(Directory, self).__init__(value, auto_set, entries, exists, **metadata)
'Trait handles neuroimaging files. Parameters types : list Strings of file format types accepted compressed : boolean Indicates whether the file format can compressed'
def __init__(self, value=u'', filter=None, auto_set=False, entries=0, exists=False, types=[], allow_compressed=True, **metadata):
self.types = types self.allow_compressed = allow_compressed super(ImageFile, self).__init__(value, filter, auto_set, entries, exists, **metadata)
'Validates that a specified value is valid for this trait.'
def validate(self, object, name, value):
validated_value = super(ImageFile, self).validate(object, name, value) if (validated_value and self.types): self._exts = self.grab_exts() if (not any((validated_value.endswith(x) for x in self._exts))): raise TraitError(args=u'{} is not included in allowed types: ...
'Generate all possible permutations of < multi - tensor > < single - tensor > options'
def _gen_model_options():
single_tensor = [u'dt', u'restore', u'algdt', u'nldt_pos', u'nldt', u'ldt_wtd'] multi_tensor = [u'cylcyl', u'cylcyl_eq', u'pospos', u'pospos_eq', u'poscyl', u'poscyl_eq', u'cylcylcyl', u'cylcylcyl_eq', u'pospospos', u'pospospos_eq', u'posposcyl', u'posposcyl_eq', u'poscylcyl', u'poscylcyl_eq'] other = [u'ad...
'extract the proper filename from the first line of the artifacts file'
def _get_cleaned_functional_filename(self, artifacts_list_filename):
artifacts_list_file = open(artifacts_list_filename, u'r') (functional_filename, extension) = artifacts_list_file.readline().split(u'.') (artifacts_list_file_path, artifacts_list_filename) = os.path.split(artifacts_list_filename) return os.path.join(artifacts_list_file_path, (functional_filename + u'_cle...
'Check for fsl version on system Parameters None Returns version : str Version number as string or None if FSL not found'
@staticmethod def version():
try: basedir = os.environ[u'FSLDIR'] except KeyError: return None out = open((u'%s/etc/fslversion' % basedir)).read() return out.strip(u'\n')
'Get the file extension for the given output type. Parameters output_type : {\'NIFTI\', \'NIFTI_GZ\', \'NIFTI_PAIR\', \'NIFTI_PAIR_GZ\'} String specifying the output type. Returns extension : str The file extension for the output type.'
@classmethod def output_type_to_ext(cls, output_type):
try: return cls.ftypes[output_type] except KeyError: msg = (u'Invalid FSLOUTPUTTYPE: ', output_type) raise KeyError(msg)
'Get the global FSL output file type FSLOUTPUTTYPE. This returns the value of the environment variable FSLOUTPUTTYPE. An exception is raised if it is not defined. Returns fsl_ftype : string Represents the current environment setting of FSLOUTPUTTYPE'
@classmethod def output_type(cls):
try: return os.environ[u'FSLOUTPUTTYPE'] except KeyError: LOGGER.warn(u'FSLOUTPUTTYPE environment variable is not set. Setting FSLOUTPUTTYPE=NIFTI') return u'NIFTI'
'Grab an image from the standard location. Returns a list of standard images if called without arguments. Could be made more fancy to allow for more relocatability'
@staticmethod def standard_image(img_name=None):
try: fsldir = os.environ[u'FSLDIR'] except KeyError: raise Exception(u'FSL environment variables not set') stdpath = os.path.join(fsldir, u'data', u'standard') if (img_name is None): return [filename.replace((stdpath + u'/'), u'') for filename in glob(os.path.join(std...
'Set the default output type for FSL classes. This method is used to set the default output type for all fSL subclasses. However, setting this will not update the output type for any existing instances. For these, assign the <instance>.inputs.output_type.'
@classmethod def set_default_output_type(cls, output_type):
if (output_type in Info.ftypes): cls._output_type = output_type else: raise AttributeError((u'Invalid FSL output_type: %s' % output_type))
'Generate a filename based on the given parameters. The filename will take the form: cwd/basename<suffix><ext>. If change_ext is True, it will use the extentions specified in <instance>intputs.output_type. Parameters basename : str Filename to base the new filename on. cwd : str Path to prefix to the new filename. (def...
def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None):
if (basename == u''): msg = (u'Unable to generate filename for command %s. ' % self.cmd) msg += u'basename is not set!' raise ValueError(msg) if (cwd is None): cwd = os.getcwd() if (ext is None): ext = Info.output_type_to_ext(self.inputs....
'Create a Bunch which contains all possible files generated by running the interface. Some files are always generated, others depending on which ``inputs`` options are set. Returns outputs : Bunch object Bunch object containing all possible files generated by interface object. If None, file was not generated Else, con...
def _list_outputs(self):
outputs = self._outputs().get() outputs[u'roi_file'] = self.inputs.roi_file if (not isdefined(outputs[u'roi_file'])): outputs[u'roi_file'] = self._gen_fname(self.inputs.in_file, suffix=u'_roi') outputs[u'roi_file'] = os.path.abspath(outputs[u'roi_file']) return outputs
'Create a Bunch which contains all possible files generated by running the interface. Some files are always generated, others depending on which ``inputs`` options are set. Returns outputs : Bunch object Bunch object containing all possible files generated by interface object. If None, file was not generated Else, con...
def _list_outputs(self):
outputs = self._outputs().get() ext = Info.output_type_to_ext(self.inputs.output_type) outbase = u'vol*' if isdefined(self.inputs.out_base_name): outbase = (u'%s*' % self.inputs.out_base_name) outputs[u'out_files'] = sorted(glob(os.path.join(os.getcwd(), (outbase + ext)))) return outputs...
'Generate a topup compatible encoding file based on given directions'
def _generate_encfile(self):
out_file = self._get_encfilename() durations = self.inputs.readout_times if (len(self.inputs.encoding_direction) != len(durations)): if (len(self.inputs.readout_times) != 1): raise ValueError(u'Readout time must be a float or match thelength of encoding d...
'Writes out currently set options to specified config file XX TODO : need to figure out how the config file is written Parameters configfile : /path/to/configfile'
def write_config(self, configfile):
try: fid = open(configfile, u'w+') except IOError: print((u'unable to create config_file %s' % configfile)) for item in list(self.inputs.get().items()): fid.write((u'%s\n' % item)) fid.close()
'Removes valid intensitymap extensions from `f`, returning a basename that can refer to both intensitymap files.'
@classmethod def intensitymap_file_basename(cls, f):
for ext in (list(Info.ftypes.values()) + [u'.txt']): if f.endswith(ext): return f[:(- len(ext))] return f
'Creates EV files from condition and regressor information. Parameters: runinfo : dict Generated by `SpecifyModel` and contains information about events and other regressors. runidx : int Index to run number ev_parameters : dict A dictionary containing the model parameters for the given design type. orthogonalization ...
def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, orthogonalization, contrasts, do_tempfilter, basis_key):
conds = {} evname = [] if (basis_key == u'dgamma'): basis_key = u'hrf' elif (basis_key == u'gamma'): try: _ = ev_parameters[u'gammasigma'] except KeyError: ev_parameters[u'gammasigma'] = 3 try: _ = ev_parameters[u'gammadelay'] e...
'Returns functional files in the order of runs'
def _get_func_files(self, session_info):
func_files = [] for (i, info) in enumerate(session_info): func_files.insert(i, info[u'scans']) return func_files
'Check for afni version on system Parameters None Returns version : str Version number as string or None if AFNI not found'
@staticmethod def version():
try: clout = CommandLine(command=u'afni_vcheck', terminal_output=u'allatonce').run() currv = clout.runtime.stdout.split(u'\n')[1].split(u'=', 1)[1].strip() except IOError: IFLOGGER.warn(u'afni_vcheck executable not found.') return None except RuntimeError as e: ...
'Get the file extension for the given output type. Parameters outputtype : {\'NIFTI\', \'NIFTI_GZ\', \'AFNI\'} String specifying the output type. Returns extension : str The file extension for the output type.'
@classmethod def output_type_to_ext(cls, outputtype):
try: return cls.ftypes[outputtype] except KeyError as e: msg = (u'Invalid AFNIOUTPUTTYPE: ', outputtype) raise_from(KeyError(msg), e)
'AFNI has no environment variables, Output filetypes get set in command line calls Nipype uses AFNI as default Returns None'
@classmethod def outputtype(cls):
return u'AFNI'
'Grab an image from the standard location. Could be made more fancy to allow for more relocatability'
@staticmethod def standard_image(img_name):
clout = CommandLine(u'which afni', terminal_output=u'allatonce').run() if (clout.runtime.returncode is not 0): return None out = clout.runtime.stdout basedir = os.path.split(out)[0] return os.path.join(basedir, img_name)
'i think? updates class private attribute based on instance input in fsl also updates ENVIRON variable....not valid in afni as it uses no environment variables'
def _output_update(self):
self._outputtype = self.inputs.outputtype
'Set the default output type for AFNI classes. This method is used to set the default output type for all afni subclasses. However, setting this will not update the output type for any existing instances. For these, assign the <instance>.inputs.outputtype.'
@classmethod def set_default_output_type(cls, outputtype):
if (outputtype in Info.ftypes): cls._outputtype = outputtype else: raise AttributeError((u'Invalid AFNI outputtype: %s' % outputtype))
'Generate a filename based on the given parameters. The filename will take the form: cwd/basename<suffix><ext>. If change_ext is True, it will use the extentions specified in <instance>intputs.output_type. Parameters basename : str Filename to base the new filename on. cwd : str Path to prefix to the new filename. (def...
def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None):
if (basename == u''): msg = (u'Unable to generate filename for command %s. ' % self.cmd) msg += u'basename is not set!' raise ValueError(msg) if (cwd is None): cwd = os.getcwd() if (ext is None): ext = Info.output_type_to_ext(self.inputs....
'Skip the arguments without argstr metadata'
def _parse_inputs(self, skip=None):
return super(Calc, self)._parse_inputs(skip=(u'start_idx', u'stop_idx', u'other'))
'Skip the arguments without argstr metadata'
def _parse_inputs(self, skip=None):
return super(Eval, self)._parse_inputs(skip=(u'start_idx', u'stop_idx', u'other'))
'Return the output path for the gernerated Nifti.'
def _get_out_path(self, meta, idx=None):
if self.inputs.out_format: out_fmt = self.inputs.out_format else: out_fmt = [] if (idx is not None): out_fmt.append((u'%03d' % idx)) if (u'SeriesNumber' in meta): out_fmt.append(u'%(SeriesNumber)03d') if (u'ProtocolName' in meta): out_f...
'Enables debug configuration'
def enable_debug_mode(self):
self._config.set(u'execution', u'stop_on_first_crash', u'true') self._config.set(u'execution', u'remove_unnecessary_outputs', u'false') self._config.set(u'execution', u'keep_inputs', u'true') self._config.set(u'logging', u'workflow_level', u'DEBUG') self._config.set(u'logging', u'interface_level', u...
'Sets logging directory This should be the first thing that is done before any nipype class with logging is imported.'
def set_log_dir(self, log_dir):
self._config.set(u'logging', u'log_directory', log_dir)
'Helper to log what actually changed from old to new values of dictionaries. typical use -- log difference for hashed_inputs'
def logdebug_dict_differences(self, dold, dnew, prefix=u''):
if isinstance(dnew, list): dnew = dict(dnew) if isinstance(dold, list): dold = dict(dold) new_keys = set(dnew.keys()) old_keys = set(dold.keys()) if len((new_keys - old_keys)): self._logger.debug((u'%s not previously seen: %s' % (prefix, (new_keys - old_keys)))) ...
'Create a OneTimeProperty instance. Parameters func : method The method that will be called the first time to compute a value. Afterwards, the method\'s name will be a standard attribute holding the value of this computation.'
def __init__(self, func):
self.getter = func self.name = func.__name__
'Called on attribute access on the class or instance.'
def __get__(self, obj, type=None):
if (obj is None): return self.getter val = self.getter(obj) setattr(obj, self.name, val) return val
'build and install nipype in a temporary location.'
def run(self):
install = self.distribution.get_command_obj(u'install') install.install_scripts = self.temp_install_dir install.install_base = self.temp_install_dir install.install_platlib = self.temp_install_dir install.install_purelib = self.temp_install_dir install.install_data = self.temp_install_dir in...
'Parameters data : str String with lines separated by \''
def __init__(self, data):
if isinstance(data, list): self._str = data else: self._str = data.split('\n') self.reset()
'func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3'
def _parse_see_also(self, content):
items = [] def parse_item_name(text): "Match ':role:`name`' or 'name'" m = self._name_rgx.match(text) if m: g = m.groups() if (g[1] is None): return (g[3], None) else: return (g[2], g[1]) raise ValueErro...
'.. index: default :refguide: something, else, and more'
def _parse_index(self, section, content):
def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if (len(section) > 1): out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if (len(line) > 2): out[line[1]] = st...
'Grab signature (if given) and summary'
def _parse_summary(self):
if self._is_at_section(): return summary = self._doc.read_to_next_empty_line() summary_str = ' '.join([s.strip() for s in summary]).strip() if re.compile('^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$').match(summary_str): self['Signature'] = summary_str if (not self._is_at_section(...
'Generate a member listing, autosummary:: table where possible, and a table where not.'
def _str_member_list(self, name):
out = [] if self[name]: out += [(u'.. rubric:: %s' % name), u''] prefix = getattr(self, u'_name', u'') if prefix: prefix = (u'~%s.' % prefix) autosum = [] others = [] for (param, param_type, desc) in self[name]: param = param.strip() ...
'Finds all the tests modules in tests/, and runs them.'
def run(self):
from pymysqlreplication import tests import unittest unittest.main(tests, argv=sys.argv[:1])
'Use for WRITE, UPDATE and DELETE events. Return an array of column data'
def _read_column_data(self, cols_bitmap):
values = {} null_bitmap = self.packet.read(((BitCount(cols_bitmap) + 7) / 8)) nullBitmapIndex = 0 nb_columns = len(self.columns) for i in range(0, nb_columns): column = self.columns[i] name = self.table_map[self.table_id].columns[i].name unsigned = self.table_map[self.table_i...
'Read and add the fractional part of time For more details about new date format: http://dev.mysql.com/doc/internals/en/date-and-time-data-type-representation.html'
def __add_fsp_to_time(self, time, column):
microsecond = self.__read_fsp(column) if (microsecond > 0): time = time.replace(microsecond=microsecond) return time
'Read MySQL BIT type'
def __read_bit(self, column):
resp = '' for byte in range(0, column.bytes): current_byte = '' data = self.packet.read_uint8() if (byte == 0): if (column.bytes == 1): end = column.bits else: end = (column.bits % 8) if (end == 0): ...
'TIME encoding for nonfractional part: 1 bit sign (1= non-negative, 0= negative) 1 bit unused (reserved for future extensions) 10 bits hour (0-838) 6 bits minute (0-59) 6 bits second (0-59) 24 bits = 3 bytes'
def __read_time2(self, column):
data = self.packet.read_int_be_by_size(3) sign = (1 if self.__read_binary_slice(data, 0, 1, 24) else (-1)) if (sign == (-1)): data = ((~ data) + 1) t = datetime.timedelta(hours=(sign * self.__read_binary_slice(data, 2, 10, 24)), minutes=self.__read_binary_slice(data, 12, 6, 24), seconds=self.__r...
'DATETIME 1 bit sign (1= non-negative, 0= negative) 17 bits year*13+month (year 0-9999, month 0-12) 5 bits day (0-31) 5 bits hour (0-23) 6 bits minute (0-59) 6 bits second (0-59) 40 bits = 5 bytes'
def __read_datetime2(self, column):
data = self.packet.read_int_be_by_size(5) year_month = self.__read_binary_slice(data, 1, 17, 40) try: t = datetime.datetime(year=int((year_month / 13)), month=(year_month % 13), day=self.__read_binary_slice(data, 18, 5, 40), hour=self.__read_binary_slice(data, 23, 5, 40), minute=self.__read_binary_s...
'Read MySQL\'s new decimal format introduced in MySQL 5'
def __read_new_decimal(self, column):
digits_per_integer = 9 compressed_bytes = [0, 1, 1, 2, 2, 3, 3, 4, 4, 4] integral = (column.precision - column.decimals) uncomp_integral = int((integral / digits_per_integer)) uncomp_fractional = int((column.decimals / digits_per_integer)) comp_integral = (integral - (uncomp_integral * digits_pe...
'Read a part of binary data and extract a number binary: the data start: From which bit (1 to X) size: How many bits should be read data_length: data size'
def __read_binary_slice(self, binary, start, size, data_length):
binary = (binary >> (data_length - (start + size))) mask = ((1 << size) - 1) return (binary & mask)
'Return the MySQL version of the server If version is 5.6.10-log the result is 5.6.10'
def getMySQLVersion(self):
return self.execute('SELECT VERSION()').fetchone()[0].split('-')[0]
'set sql_mode to test with same sql_mode (mysql 5.7 sql_mode default is changed)'
def set_sql_mode(self):
version = float(self.getMySQLVersion().rsplit('.', 1)[0]) if (version == 5.7): self.execute("set @@sql_mode='NO_ENGINE_SUBSTITUTION'")
'Events the BinLogStreamReader should ignore'
@staticmethod def ignored_events():
return [GtidEvent]
'A missing RotateEvent and skip_to_timestamp cause corruption This test shows that a binlog file which lacks the trailing RotateEvent and the use of the ``skip_to_timestamp`` argument together can cause the table_map to become corrupt. The trailing RotateEvent has a timestamp, but may be lost if the server crashes. T...
def test_no_trailing_rotate_event(self):
self.execute('CREATE TABLE test (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY(id))') self.execute('SET AUTOCOMMIT = 0') self.execute('INSERT INTO test(id, data) VALUES (1, "Hello")') self.execute('COMMIT') ...
'Remove the trailing RotateEvent from the first binlog According to the MySQL Internals Manual, a RotateEvent will be added to the end of a binlog when the binlog is rotated. This may not happen if the server crashes, for example. This method removes the trailing RotateEvent to verify that the library properly handles...
def _remove_trailing_rotate_event_from_first_binlog(self):
datadir = self.execute("SHOW VARIABLES LIKE 'datadir'").fetchone()[1] binlog = self.execute('SHOW BINARY LOGS').fetchone()[0] binlogpath = os.path.join(datadir, binlog) reader = SimpleBinLogFileReader(binlogpath, only_events=[RotateEvent]) for _ in reader: reader.truncatebinlo...
'Fetch one record from the binlog file'
def fetchone(self):
if ((self._pos is None) or (self._pos < 4)): self._read_magic() while True: event = self._read_event() self._current_event = event if (event is None): return None if self._filter_events(event): return event
'Truncate the binlog file at the current event'
def truncatebinlog(self):
if (self._current_event is not None): self._file.truncate(self._current_event.pos)
'Return True if an event can be returned'
def _filter_events(self, event):
event_type = {constants.QUERY_EVENT: QueryEvent, constants.ROTATE_EVENT: RotateEvent, constants.FORMAT_DESCRIPTION_EVENT: FormatDescriptionEvent, constants.XID_EVENT: XidEvent, constants.TABLE_MAP_EVENT: TableMapEvent, constants.WRITE_ROWS_EVENT_V2: WriteRowsEvent}.get(event.event_type) return (event_type in se...