_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q44500
FetchTransformSaveWithSeparateNewCrashSourceApp._setup_source_and_destination
train
def _setup_source_and_destination(self): """use the base class to setup the source and destinations but add to that setup the instantiation of the "new_crash_source" """ super(FetchTransformSaveWithSeparateNewCrashSourceApp, self) \ ._setup_source_and_destination() if self.config.new_crash_source.new_crash_source_class: self.new_crash_source = \ self.config.new_crash_source.new_crash_source_class( self.config.new_crash_source, name=self.app_instance_name, quit_check_callback=self.quit_check ) else: # the configuration failed to provide a "new_crash_source", fall # back to tying the "new_crash_source" to the "source". self.new_crash_source = self.source
python
{ "resource": "" }
q44501
load_config
train
def load_config(options): ''' Load options, platform, colors, and icons. ''' global opts, pform opts = options pform = options.pform global_ns = globals() # get colors if pform.hicolor: global_ns['dim_templ'] = ansi.dim8t global_ns['swap_clr_templ'] = ansi.csi8_blk % ansi.blu8 else: global_ns['dim_templ'] = ansi.dim4t global_ns['swap_clr_templ'] = ansi.fbblue # load icons into module namespace for varname in dir(pform): if varname.startswith('_') and varname.endswith('ico'): global_ns[varname] = getattr(pform, varname)
python
{ "resource": "" }
q44502
fmtstr
train
def fmtstr(text='', colorstr=None, align='>', trunc=True, width=0, end=' '): ''' Formats, justifies, and returns a given string according to specifications. ''' colwidth = width or opts.colwidth if trunc: if len(text) > colwidth: text = truncstr(text, colwidth, align=trunc) # truncate w/ellipsis value = f'{text:{align}{colwidth}}' if opts.incolor and colorstr: return colorstr % value + end else: return value + end
python
{ "resource": "" }
q44503
fmtval
train
def fmtval(value, colorstr=None, precision=None, spacing=True, trunc=True, end=' '): ''' Formats and returns a given number according to specifications. ''' colwidth = opts.colwidth # get precision if precision is None: precision = opts.precision fmt = '%%.%sf' % precision # format with decimal mark, separators result = locale.format(fmt, value, True) if spacing: result = '%%%ss' % colwidth % result if trunc: if len(result) > colwidth: # truncate w/ellipsis result = truncstr(result, colwidth) # Add color if needed if opts.incolor and colorstr: return colorstr % result + end else: return result + end
python
{ "resource": "" }
q44504
get_units
train
def get_units(unit, binary=False): ''' Sets the output unit and precision for future calculations and returns an integer and the string representation of it. ''' result = None if unit == 'b': result = 1, 'Byte' elif binary: # 2^X if unit == 'k': result = 1024, 'Kibibyte' elif unit == 'm': result = 1048576, 'Mebibyte' elif unit == 'g': if opts.precision == -1: opts.precision = 3 result = 1073741824, 'Gibibyte' elif unit == 't': if opts.precision == -1: opts.precision = 3 result = 1099511627776, 'Tebibyte' else: # 10^x if unit == 'k': result = 1000, 'Kilobyte' elif unit == 'm': result = 1000000, 'Megabyte' elif unit == 'g': if opts.precision == -1: opts.precision = 3 # new defaults result = 1000000000, 'Gigabyte' elif unit == 't': if opts.precision == -1: opts.precision = 3 result = 1000000000000, 'Terabyte' if not result: print(f'Warning: incorrect parameter: {unit}.') result = _outunit if opts.precision == -1: # auto opts.precision = 0 return result
python
{ "resource": "" }
q44505
truncstr
train
def truncstr(text, width, align='right'): ''' Truncate a string, with trailing ellipsis. ''' before = after = '' if align == 'left': truncated = text[-width+1:] before = _ellpico elif align: truncated = text[:width-1] after = _ellpico return f'{before}{truncated}{after}'
python
{ "resource": "" }
q44506
Archiver.archive
train
def archive(self, target_path=None, zip_path=None): """ Writes the Zip-encoded file to a directory. :param target_path: The directory path to add. :type target_path: str :param zip_path: The file path of the ZIP archive. :type zip_path: str """ if target_path: self.target_path = target_path if zip_path: self.zip_path = zip_path if self.has_path is False or os.path.isdir(self.target_path) is False: raise RuntimeError("") zip = zipfile.ZipFile( self.zip_path, 'w', zipfile.ZIP_DEFLATED ) for root, _, files in os.walk(self.target_path): for file in files: if file in ARCHIVE_IGNORE_FILES: continue current_dir = os.path.relpath(root, self.target_path) if current_dir == ".": file_path = file else: file_path = os.path.join(current_dir, file) print("Archive {}".format(file)) zip.write( os.path.join(root, file), file_path ) zip.close()
python
{ "resource": "" }
q44507
Archiver.unarchive
train
def unarchive(self, target_path=None, zip_path=None): """ Extract the given files to the specified destination. :param src_path: The destination path where to extract the files. :type src_path: str :param zip_path: The file path of the ZIP archive. :type zip_path: str """ if target_path: self.target_path = target_path if zip_path: self.zip_path = zip_path if self.has_path is False: raise RuntimeError("") if os.path.isdir(self.target_path) is False: os.mkdir(self.target_path) with zipfile.ZipFile(self.zip_path, 'r') as zip: zip.extractall(self.target_path)
python
{ "resource": "" }
q44508
set_sns
train
def set_sns(style="white", context="paper", font_scale=1.5, color_codes=True, rc={}): """Set default plot style using seaborn. Font size is set to match the size of the tick labels, rather than the axes labels. """ rcd = {"lines.markersize": 8, "lines.markeredgewidth": 1.25, "legend.fontsize": "small", "font.size": 12/1.5*font_scale, "legend.frameon": True, "axes.formatter.limits": (-5, 5), "axes.grid": True} rcd.update(rc) import seaborn as sns sns.set(style=style, context=context, font_scale=font_scale, color_codes=color_codes, rc=rcd)
python
{ "resource": "" }
q44509
label_subplot
train
def label_subplot(ax=None, x=0.5, y=-0.25, text="(a)", **kwargs): """Create a subplot label.""" if ax is None: ax = plt.gca() ax.text(x=x, y=y, s=text, transform=ax.transAxes, horizontalalignment="center", verticalalignment="top", **kwargs)
python
{ "resource": "" }
q44510
GenDebBuilder.genchanges
train
def genchanges(self): """Generate a .changes file for this package.""" chparams = self.params.copy() debpath = os.path.join(self.buildroot, self.rule.output_files[0]) chparams.update({ 'fullversion': '{epoch}:{version}-{release}'.format(**chparams), 'metahash': self._metahash().hexdigest(), 'deb_sha1': util.hash_file(debpath, hashlib.sha1()).hexdigest(), 'deb_sha256': util.hash_file(debpath, hashlib.sha256() ).hexdigest(), 'deb_md5': util.hash_file(debpath, hashlib.md5()).hexdigest(), 'deb_bytes': os.stat(debpath).st_size, # TODO: having to do this split('/')[-1] is absurd: 'deb_filename': debpath.split('/')[-1], }) output = '\n'.join([ 'Format: 1.8', # Static date string for repeatable builds: 'Date: Tue, 01 Jan 2013 00:00:00 -0700', 'Source: {package_name}', 'Binary: {package_name}', 'Architecture: {arch}', 'Version: {fullversion}', 'Distribution: {distro}', 'Urgency: {urgency}', 'Maintainer: {packager}', 'Description: ', ' {package_name} - {short_description}', 'Changes: ', ' {package_name} ({fullversion}) {distro}; urgency={urgency}', ' .', ' * Built by Butcher - metahash for this build is {metahash}', 'Checksums-Sha1: ', ' {deb_sha1} {deb_bytes} {deb_filename}', 'Checksums-Sha256: ', ' {deb_sha256} {deb_bytes} {deb_filename}', 'Files: ', ' {deb_md5} {deb_bytes} {section} {priority} {deb_filename}', '' # Newline at end of file. ]).format(**chparams) return output
python
{ "resource": "" }
q44511
GenDeb.validate_args
train
def validate_args(self): """Input validators for this rule type.""" base.BaseTarget.validate_args(self) params = self.params if params['extra_control_fields'] is not None: assert isinstance(params['extra_control_fields'], list), ( 'extra_control_fields must be a list of tuples, not %s' % type( params['extra_control_fields'])) for elem in params['extra_control_fields']: assert (isinstance(elem, tuple) and len(elem) == 1), ( 'extra_control_fields must be a list of 2-element tuples. ' 'Invalid contents: %s' % elem) pkgname_re = '^[a-z][a-z0-9+-.]+' assert re.match(pkgname_re, params['package_name']), ( 'Invalid package name: %s. Must match %s' % ( params['package_name'], pkgname_re))
python
{ "resource": "" }
q44512
LineDiscountItem.value
train
def value(self): """Returns the positive value to subtract from the total.""" originalPrice = self.lineItem.totalPrice if self.flatRate == 0: return originalPrice * self.percent return self.flatRate
python
{ "resource": "" }
q44513
voxel_count
train
def voxel_count(dset,p=None,positive_only=False,mask=None,ROI=None): ''' returns the number of non-zero voxels :p: threshold the dataset at the given *p*-value, then count :positive_only: only count positive values :mask: count within the given mask :ROI: only use the ROI with the given value (or list of values) within the mask if ROI is 'all' then return the voxel count of each ROI as a dictionary ''' if p: dset = nl.thresh(dset,p,positive_only) else: if positive_only: dset = nl.calc(dset,'step(a)') count = 0 devnull = open(os.devnull,"w") if mask: cmd = ['3dROIstats','-1Dformat','-nomeanout','-nobriklab', '-nzvoxels'] cmd += ['-mask',str(mask),str(dset)] out = subprocess.check_output(cmd,stderr=devnull).split('\n') if len(out)<4: return 0 rois = [int(x.replace('NZcount_','')) for x in out[1].strip()[1:].split()] counts = [int(x.replace('NZcount_','')) for x in out[3].strip().split()] count_dict = None if ROI==None: ROI = rois if ROI=='all': count_dict = {} ROI = rois else: if not isinstance(ROI,list): ROI = [ROI] for r in ROI: if r in rois: roi_count = counts[rois.index(r)] if count_dict!=None: count_dict[r] = roi_count else: count += roi_count else: cmd = ['3dBrickStat', '-slow', '-count', '-non-zero', str(dset)] count = int(subprocess.check_output(cmd,stderr=devnull).strip()) if count_dict: return count_dict return count
python
{ "resource": "" }
q44514
mask_average
train
def mask_average(dset,mask): '''Returns average of voxels in ``dset`` within non-zero voxels of ``mask``''' o = nl.run(['3dmaskave','-q','-mask',mask,dset]) if o: return float(o.output.split()[-1])
python
{ "resource": "" }
q44515
FreeIPAUser.is_member_of
train
def is_member_of(self, group_name): """Return True if member of LDAP group, otherwise return False""" group_dn = 'cn=%s,cn=groups,cn=accounts,%s' % (group_name, self._base_dn) if str(group_dn).lower() in [str(i).lower() for i in self.member_of]: return True else: return False
python
{ "resource": "" }
q44516
_commonPrefetchDeclarativeIds
train
def _commonPrefetchDeclarativeIds(engine, mutex, Declarative, count) -> Optional[Iterable[int]]: """ Common Prefetch Declarative IDs This function is used by the worker and server """ if not count: logger.debug("Count was zero, no range returned") return conn = engine.connect() transaction = conn.begin() mutex.acquire() try: sequence = Sequence('%s_id_seq' % Declarative.__tablename__, schema=Declarative.metadata.schema) if isPostGreSQLDialect(engine): sql = "SELECT setval('%(seq)s', (select nextval('%(seq)s') + %(add)s), true)" sql %= { 'seq': '"%s"."%s"' % (sequence.schema, sequence.name), 'add': count } nextStartId = conn.execute(sql).fetchone()[0] startId = nextStartId - count elif isMssqlDialect(engine): startId = conn.execute( 'SELECT NEXT VALUE FOR "%s"."%s"' % (sequence.schema, sequence.name) ).fetchone()[0] + 1 nextStartId = startId + count conn.execute('alter sequence "%s"."%s" restart with %s' % (sequence.schema, sequence.name, nextStartId)) else: raise NotImplementedError() transaction.commit() return iter(range(startId, nextStartId)) finally: mutex.release() conn.close()
python
{ "resource": "" }
q44517
DbConnection.ormSessionCreator
train
def ormSessionCreator(self) -> DbSessionCreator: """ Get Orm Session :return: A SQLAlchemy session scoped for the callers thread.. """ assert self._dbConnectString if self._ScopedSession: return self._ScopedSession self._dbEngine = create_engine( self._dbConnectString, **self._dbEngineArgs ) self._ScopedSession = scoped_session( sessionmaker(bind=self._dbEngine)) return self._ScopedSession
python
{ "resource": "" }
q44518
DbConnection.checkForeignKeys
train
def checkForeignKeys(self, engine: Engine) -> None: """ Check Foreign Keys Log any foreign keys that don't have indexes assigned to them. This is a performance issue. """ missing = (sqlalchemy_utils.functions .non_indexed_foreign_keys(self._metadata, engine=engine)) for table, keys in missing.items(): for key in keys: logger.warning("Missing index on ForeignKey %s" % key.columns)
python
{ "resource": "" }
q44519
temporary_tag
train
def temporary_tag(tag): """ Temporarily tags the repo """ if tag: CTX.repo.tag(tag) try: yield finally: if tag: CTX.repo.remove_tag(tag)
python
{ "resource": "" }
q44520
savejson
train
def savejson(filename, datadict): """Save data from a dictionary in JSON format. Note that this only works to the second level of the dictionary with Numpy arrays. """ for key, value in datadict.items(): if type(value) == np.ndarray: datadict[key] = value.tolist() if type(value) == dict: for key2, value2 in value.items(): if type(value2) == np.ndarray: datadict[key][key2] = value2.tolist() with open(filename, "w") as f: f.write(json.dumps(datadict, indent=4))
python
{ "resource": "" }
q44521
loadjson
train
def loadjson(filename, asnparrays=False): """Load data from text file in JSON format. Numpy arrays are converted if specified with the `asnparrays` keyword argument. Note that this only works to the second level of the dictionary. Returns a single dict. """ with open(filename) as f: data = json.load(f) if asnparrays: for key, value in data.items(): if type(value) is list: data[key] = np.asarray(value) if type(value) is dict: for key2, value2 in value.items(): if type(value2) is list: data[key][key2] = np.asarray(value2) return data
python
{ "resource": "" }
q44522
savecsv
train
def savecsv(filename, datadict, mode="w"): """Save a dictionary of data to CSV.""" if mode == "a" : header = False else: header = True with open(filename, mode) as f: _pd.DataFrame(datadict).to_csv(f, index=False, header=header)
python
{ "resource": "" }
q44523
loadcsv
train
def loadcsv(filename): """Load data from CSV file. Returns a single dict with column names as keys. """ dataframe = _pd.read_csv(filename) data = {} for key, value in dataframe.items(): data[key] = value.values return data
python
{ "resource": "" }
q44524
save_hdf_metadata
train
def save_hdf_metadata(filename, metadata, groupname="data", mode="a"): """"Save a dictionary of metadata to a group's attrs.""" with _h5py.File(filename, mode) as f: for key, val in metadata.items(): f[groupname].attrs[key] = val
python
{ "resource": "" }
q44525
load_hdf_metadata
train
def load_hdf_metadata(filename, groupname="data"): """"Load attrs of the desired group into a dictionary.""" with _h5py.File(filename, "r") as f: data = dict(f[groupname].attrs) return data
python
{ "resource": "" }
q44526
FuncArgParser.get_param_doc
train
def get_param_doc(doc, param): """Get the documentation and datatype for a parameter This function returns the documentation and the argument for a napoleon like structured docstring `doc` Parameters ---------- doc: str The base docstring to use param: str The argument to use Returns ------- str The documentation of the given `param` str The datatype of the given `param`""" arg_doc = docstrings.keep_params_s(doc, [param]) or \ docstrings.keep_types_s(doc, [param]) dtype = None if arg_doc: lines = arg_doc.splitlines() arg_doc = dedents('\n' + '\n'.join(lines[1:])) param_desc = lines[0].split(':', 1) if len(param_desc) > 1: dtype = param_desc[1].strip() return arg_doc, dtype
python
{ "resource": "" }
q44527
FuncArgParser.setup_args
train
def setup_args(self, func=None, setup_as=None, insert_at=None, interprete=True, epilog_sections=None, overwrite=False, append_epilog=True): """ Add the parameters from the given `func` to the parameter settings Parameters ---------- func: function The function to use. If None, a function will be returned that can be used as a decorator setup_as: str The attribute that shall be assigned to the function in the resulting namespace. If specified, this function will be used when calling the :meth:`parse2func` method insert_at: int The position where the given `func` should be inserted. If None, it will be appended at the end and used when calling the :meth:`parse2func` method interprete: bool If True (default), the docstrings are interpreted and switches and lists are automatically inserted (see the [interpretation-docs]_ epilog_sections: list of str The headers of the sections to extract. If None, the :attr:`epilog_sections` attribute is used overwrite: bool If True, overwrite the existing epilog and the existing description of the parser append_epilog: bool If True, append to the existing epilog Returns ------- function Either the function that can be used as a decorator (if `func` is ``None``), or the given `func` itself. Examples -------- Use this method as a decorator:: >>> @parser.setup_args ... def do_something(a=1): ''' Just an example Parameters ---------- a: int A number to increment by one ''' return a + 1 >>> args = parser.parse_args('-a 2'.split()) Or by specifying the setup_as function:: >>> @parser.setup_args(setup_as='func') ... def do_something(a=1): ''' Just an example Parameters ---------- a: int A number to increment by one ''' return a + 1 >>> args = parser.parse_args('-a 2'.split()) >>> args.func is do_something >>> parser.parse2func('-a 2'.split()) 3 References ---------- .. [interpretation-docs] http://funcargparse.readthedocs.io/en/latest/docstring_interpretation.html) """ def setup(func): # insert the function if insert_at is None: self._used_functions.append(func) else: self._used_functions.insert(insert_at, func) args_dict = self.unfinished_arguments # save the function to use in parse2funcs if setup_as: args_dict[setup_as] = dict( long=setup_as, default=func, help=argparse.SUPPRESS) self._setup_as = setup_as # create arguments args, varargs, varkw, defaults = inspect.getargspec(func) full_doc = docstrings.dedents(inspect.getdoc(func)) summary = docstrings.get_full_description(full_doc) if summary: if not self.description or overwrite: self.description = summary full_doc = docstrings._remove_summary(full_doc) self.extract_as_epilog(full_doc, epilog_sections, overwrite, append_epilog) doc = docstrings._get_section(full_doc, 'Parameters') + '\n' doc += docstrings._get_section(full_doc, 'Other Parameters') doc = doc.rstrip() default_min = len(args or []) - len(defaults or []) for i, arg in enumerate(args): if arg == 'self' or arg in args_dict: continue arg_doc, dtype = self.get_param_doc(doc, arg) args_dict[arg] = d = {'dest': arg, 'short': arg.replace('_', '-'), 'long': arg.replace('_', '-')} if arg_doc: d['help'] = arg_doc if i >= default_min: d['default'] = defaults[i - default_min] else: d['positional'] = True if interprete and dtype == 'bool' and 'default' in d: d['action'] = 'store_false' if d['default'] else \ 'store_true' elif interprete and dtype: if dtype.startswith('list of'): d['nargs'] = '+' dtype = dtype[7:].strip() if dtype in ['str', 'string', 'strings']: d['type'] = six.text_type if dtype == 'strings': dtype = 'string' else: try: d['type'] = getattr(builtins, dtype) except AttributeError: try: # maybe the dtype has a final 's' d['type'] = getattr(builtins, dtype[:-1]) dtype = dtype[:-1] except AttributeError: pass d['metavar'] = dtype return func if func is None: return setup else: return setup(func)
python
{ "resource": "" }
q44528
FuncArgParser.add_subparsers
train
def add_subparsers(self, *args, **kwargs): """ Add subparsers to this parser Parameters ---------- ``*args, **kwargs`` As specified by the original :meth:`argparse.ArgumentParser.add_subparsers` method chain: bool Default: False. If True, It is enabled to chain subparsers""" chain = kwargs.pop('chain', None) ret = super(FuncArgParser, self).add_subparsers(*args, **kwargs) if chain: self._chain_subparsers = True self._subparsers_action = ret return ret
python
{ "resource": "" }
q44529
FuncArgParser.setup_subparser
train
def setup_subparser( self, func=None, setup_as=None, insert_at=None, interprete=True, epilog_sections=None, overwrite=False, append_epilog=True, return_parser=False, name=None, **kwargs): """ Create a subparser with the name of the given function Parameters are the same as for the :meth:`setup_args` function, other parameters are parsed to the :meth:`add_subparsers` method if (and only if) this method has not already been called. Parameters ---------- %(FuncArgParser.setup_args.parameters)s return_parser: bool If True, the create parser is returned instead of the function name: str The name of the created parser. If None, the function name is used and underscores (``'_'``) are replaced by minus (``'-'``) ``**kwargs`` Any other parameter that is passed to the add_parser method that creates the parser Other Parameters ---------------- Returns ------- FuncArgParser or %(FuncArgParser.setup_args.returns)s If return_parser is True, the created subparser is returned Examples -------- Use this method as a decorator:: >>> from funcargparser import FuncArgParser >>> parser = FuncArgParser() >>> @parser.setup_subparser ... def my_func(my_argument=None): ... pass >>> args = parser.parse_args('my-func -my-argument 1'.split()) """ def setup(func): if self._subparsers_action is None: raise RuntimeError( "No subparsers have yet been created! Run the " "add_subparsers method first!") # replace underscore by '-' name2use = name if name2use is None: name2use = func.__name__.replace('_', '-') kwargs.setdefault('help', docstrings.get_summary( docstrings.dedents(inspect.getdoc(func)))) parser = self._subparsers_action.add_parser(name2use, **kwargs) parser.setup_args( func, setup_as=setup_as, insert_at=insert_at, interprete=interprete, epilog_sections=epilog_sections, overwrite=overwrite, append_epilog=append_epilog) return func, parser if func is None: return lambda f: setup(f)[0] else: return setup(func)[int(return_parser)]
python
{ "resource": "" }
q44530
FuncArgParser.update_arg
train
def update_arg(self, arg, if_existent=None, **kwargs): """ Update the `add_argument` data for the given parameter Parameters ---------- arg: str The name of the function argument if_existent: bool or None If True, the argument is updated. If None (default), the argument is only updated, if it exists. Otherwise, if False, the given ``**kwargs`` are only used if the argument is not yet existing ``**kwargs`` The keyword arguments any parameter for the :meth:`argparse.ArgumentParser.add_argument` method """ if if_existent or (if_existent is None and arg in self.unfinished_arguments): self.unfinished_arguments[arg].update(kwargs) elif not if_existent and if_existent is not None: self.unfinished_arguments.setdefault(arg, kwargs)
python
{ "resource": "" }
q44531
FuncArgParser._get_corresponding_parsers
train
def _get_corresponding_parsers(self, func): """Get the parser that has been set up by the given `function`""" if func in self._used_functions: yield self if self._subparsers_action is not None: for parser in self._subparsers_action.choices.values(): for sp in parser._get_corresponding_parsers(func): yield sp
python
{ "resource": "" }
q44532
FuncArgParser.pop_key
train
def pop_key(self, arg, key, *args, **kwargs): """Delete a previously defined key for the `add_argument` """ return self.unfinished_arguments[arg].pop(key, *args, **kwargs)
python
{ "resource": "" }
q44533
FuncArgParser.create_arguments
train
def create_arguments(self, subparsers=False): """Create and add the arguments Parameters ---------- subparsers: bool If True, the arguments of the subparsers are also created""" ret = [] if not self._finalized: for arg, d in self.unfinished_arguments.items(): try: not_positional = int(not d.pop('positional', False)) short = d.pop('short', None) long_name = d.pop('long', None) if short is None and long_name is None: raise ValueError( "Either a short (-) or a long (--) argument must " "be provided!") if not not_positional: short = arg long_name = None d.pop('dest', None) if short == long_name: long_name = None args = [] if short: args.append('-' * not_positional + short) if long_name: args.append('--' * not_positional + long_name) group = d.pop('group', self) if d.get('action') in ['store_true', 'store_false']: d.pop('metavar', None) ret.append(group.add_argument(*args, **d)) except Exception: print('Error while creating argument %s' % arg) raise else: raise ValueError('Parser has already been finalized!') self._finalized = True if subparsers and self._subparsers_action is not None: for parser in self._subparsers_action.choices.values(): parser.create_arguments(True) return ret
python
{ "resource": "" }
q44534
FuncArgParser.format_epilog_section
train
def format_epilog_section(self, section, text): """Format a section for the epilog by inserting a format""" try: func = self._epilog_formatters[self.epilog_formatter] except KeyError: if not callable(self.epilog_formatter): raise func = self.epilog_formatter return func(section, text)
python
{ "resource": "" }
q44535
FuncArgParser.extract_as_epilog
train
def extract_as_epilog(self, text, sections=None, overwrite=False, append=True): """Extract epilog sections from the a docstring Parameters ---------- text The docstring to use sections: list of str The headers of the sections to extract. If None, the :attr:`epilog_sections` attribute is used overwrite: bool If True, overwrite the existing epilog append: bool If True, append to the existing epilog""" if sections is None: sections = self.epilog_sections if ((not self.epilog or overwrite or append) and sections): epilog_parts = [] for sec in sections: text = docstrings._get_section(text, sec).strip() if text: epilog_parts.append( self.format_epilog_section(sec, text)) if epilog_parts: epilog = '\n\n'.join(epilog_parts) if overwrite or not self.epilog: self.epilog = epilog else: self.epilog += '\n\n' + epilog
python
{ "resource": "" }
q44536
FuncArgParser.grouparg
train
def grouparg(self, arg, my_arg=None, parent_cmds=[]): """ Grouper function for chaining subcommands Parameters ---------- arg: str The current command line argument that is parsed my_arg: str The name of this subparser. If None, this parser is the main parser and has no parent parser parent_cmds: list of str The available commands of the parent parsers Returns ------- str or None The grouping key for the given `arg` or None if the key does not correspond to this parser or this parser is the main parser and does not have seen a subparser yet Notes ----- Quite complicated, there is no real need to deal with this function """ if self._subparsers_action is None: return None commands = self._subparsers_action.choices currentarg = self.__currentarg # the default return value is the current argument we are in or the # name of the subparser itself ret = currentarg or my_arg if currentarg is not None: # if we are already in a sub command, we use the sub parser sp_key = commands[currentarg].grouparg(arg, currentarg, chain( commands, parent_cmds)) if sp_key is None and arg in commands: # if the subparser did not recognize the command, we use the # command the corresponds to this parser or (of this parser # is the parent parser) the current subparser self.__currentarg = currentarg = arg ret = my_arg or currentarg elif sp_key not in commands and arg in parent_cmds: # otherwise, if the subparser recognizes the commmand but it is # not in the known command of this parser, it must be another # command of the subparser and this parser can ignore it ret = None else: # otherwise the command belongs to this subparser (if this one # is not the subparser) or the current subparser ret = my_arg or currentarg elif arg in commands: # if the argument is a valid subparser, we return this one self.__currentarg = arg ret = arg elif arg in parent_cmds: # if the argument is not a valid subparser but in one of our # parents, we return None to signalize that we cannot categorize # it ret = None return ret
python
{ "resource": "" }
q44537
FuncArgParser.__parse_main
train
def __parse_main(self, args): """Parse the main arguments only. This is a work around for python 2.7 because argparse does not allow to parse arguments without subparsers """ if six.PY2: self._subparsers_action.add_parser("__dummy") return super(FuncArgParser, self).parse_known_args( list(args) + ['__dummy']) return super(FuncArgParser, self).parse_known_args(args)
python
{ "resource": "" }
q44538
FuncArgParser._parse2subparser_funcs
train
def _parse2subparser_funcs(self, kws): """ Recursive function to parse arguments to chained parsers """ choices = getattr(self._subparsers_action, 'choices', {}) replaced = {key.replace('-', '_'): key for key in choices} sp_commands = set(replaced).intersection(kws) if not sp_commands: if self._setup_as is not None: func = kws.pop(self._setup_as) else: try: func = self._used_functions[-1] except IndexError: return None return func(**{ key: kws[key] for key in set(kws).difference(choices)}) else: ret = {} for key in sp_commands: ret[key.replace('-', '_')] = \ choices[replaced[key]]._parse2subparser_funcs( vars(kws[key])) return Namespace(**ret)
python
{ "resource": "" }
q44539
FuncArgParser.get_subparser
train
def get_subparser(self, name): """ Convenience method to get a certain subparser Parameters ---------- name: str The name of the subparser Returns ------- FuncArgParser The subparsers corresponding to `name` """ if self._subparsers_action is None: raise ValueError("%s has no subparsers defined!" % self) return self._subparsers_action.choices[name]
python
{ "resource": "" }
q44540
parse_file
train
def parse_file(src): """ find file in config and output to dest dir """ #clear the stack between parses if config.dest_dir == None: dest = src.dir else: dest = config.dest_dir output = get_output(src) output_file = dest + '/' + src.basename + '.min.js' f = open(output_file,'w') f.write(jsmin.jsmin(output)) f.close() print "Wrote combined and minified file to: %s" % (output_file)
python
{ "resource": "" }
q44541
get_output
train
def get_output(src): """ parse lines looking for commands """ output = '' lines = open(src.path, 'rU').readlines() for line in lines: m = re.match(config.import_regex,line) if m: include_path = os.path.abspath(src.dir + '/' + m.group('script')); if include_path not in config.sources: script = Script(include_path) script.parents.append(src) config.sources[script.path] = script include_file = config.sources[include_path] #require statements dont include if the file has already been included if include_file not in config.stack or m.group('command') == 'import': config.stack.append(include_file) output += get_output(include_file) else: output += line return output
python
{ "resource": "" }
q44542
get_signed_raw_revocation_document
train
def get_signed_raw_revocation_document(identity: Identity, salt: str, password: str) -> str: """ Generate account revocation document for given identity :param identity: Self Certification of the identity :param salt: Salt :param password: Password :rtype: str """ revocation = Revocation(PROTOCOL_VERSION, identity.currency, identity, "") key = SigningKey.from_credentials(salt, password) revocation.sign([key]) return revocation.signed_raw()
python
{ "resource": "" }
q44543
motion_from_params
train
def motion_from_params(param_file,motion_file,individual=True,rms=True): '''calculate a motion regressor from the params file given by 3dAllineate Basically just calculates the rms change in the translation and rotation components. Returns the 6 motion vector (if ``individual`` is ``True``) and the RMS difference (if ``rms`` is ``True``).''' with open(param_file) as inf: translate_rotate = np.array([[float(y) for y in x.strip().split()[:6]] for x in inf.readlines() if x[0]!='#']) motion = np.array([]) if individual: motion = np.vstack((np.zeros(translate_rotate.shape[1]),np.diff(translate_rotate,axis=0))) if rms: translate = [sqrt(sum([x**2 for x in y[:3]])) for y in translate_rotate] rotate = [sqrt(sum([x**2 for x in y[3:]])) for y in translate_rotate] translate_rotate = np.array(map(add,translate,rotate)) translate_rotate_diff = np.hstack(([0],np.diff(translate_rotate,axis=0))) if motion.shape==(0,): motion = rms_motion else: motion = np.column_stack((motion,translate_rotate_diff)) with open(motion_file,'w') as outf: outf.write('\n'.join(['\t'.join([str(y) for y in x]) for x in motion]))
python
{ "resource": "" }
q44544
volreg
train
def volreg(dset,suffix='_volreg',base=3,tshift=3,dfile_suffix='_volreg.1D'): '''simple interface to 3dvolreg :suffix: suffix to add to ``dset`` for volreg'ed file :base: either a number or ``dset[#]`` of the base image to register to :tshift: if a number, then tshift ignoring that many images, if ``None`` then don't tshift :dfile_suffix: suffix to add to ``dset`` to save the motion parameters to ''' cmd = ['3dvolreg','-prefix',nl.suffix(dset,suffix),'-base',base,'-dfile',nl.prefix(dset)+dfile_suffix] if tshift: cmd += ['-tshift',tshift] cmd += [dset] nl.run(cmd,products=nl.suffix(dset,suffix))
python
{ "resource": "" }
q44545
affine_align
train
def affine_align(dset_from,dset_to,skull_strip=True,mask=None,suffix='_aff',prefix=None,cost=None,epi=False,resample='wsinc5',grid_size=None,opts=[]): ''' interface to 3dAllineate to align anatomies and EPIs ''' dset_ss = lambda dset: os.path.split(nl.suffix(dset,'_ns'))[1] def dset_source(dset): if skull_strip==True or skull_strip==dset: return dset_ss(dset) else: return dset dset_affine = prefix if dset_affine==None: dset_affine = os.path.split(nl.suffix(dset_from,suffix))[1] dset_affine_mat_1D = nl.prefix(dset_affine) + '_matrix.1D' dset_affine_par_1D = nl.prefix(dset_affine) + '_params.1D' if os.path.exists(dset_affine): # final product already exists return for dset in [dset_from,dset_to]: if skull_strip==True or skull_strip==dset: nl.skull_strip(dset,'_ns') mask_use = mask if mask: # the mask was probably made in the space of the original dset_to anatomy, # which has now been cropped from the skull stripping. So the lesion mask # needs to be resampled to match the corresponding mask if skull_strip==True or skull_strip==dset_to: nl.run(['3dresample','-master',dset_u(dset_ss(dset)),'-inset',mask,'-prefix',nl.suffix(mask,'_resam')],products=nl.suffix(mask,'_resam')) mask_use = nl.suffix(mask,'_resam') all_cmd = [ '3dAllineate', '-prefix', dset_affine, '-base', dset_source(dset_to), '-source', dset_source(dset_from), '-source_automask', '-1Dmatrix_save', dset_affine_mat_1D, '-1Dparam_save',dset_affine_par_1D, '-autoweight', '-final',resample, '-cmass' ] + opts if grid_size: all_cmd += ['-newgrid',grid_size] if cost: all_cmd += ['-cost',cost] if epi: all_cmd += ['-EPI'] if mask: all_cmd += ['-emask', mask_use] nl.run(all_cmd,products=dset_affine)
python
{ "resource": "" }
q44546
affine_apply
train
def affine_apply(dset_from,affine_1D,master,affine_suffix='_aff',interp='NN',inverse=False,prefix=None): '''apply the 1D file from a previously aligned dataset Applies the matrix in ``affine_1D`` to ``dset_from`` and makes the final grid look like the dataset ``master`` using the interpolation method ``interp``. If ``inverse`` is True, will apply the inverse of ``affine_1D`` instead''' affine_1D_use = affine_1D if inverse: with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(subprocess.check_output(['cat_matvec',affine_1D,'-I'])) affine_1D_use = temp.name if prefix==None: prefix = nl.suffix(dset_from,affine_suffix) nl.run(['3dAllineate','-1Dmatrix_apply',affine_1D_use,'-input',dset_from,'-prefix',prefix,'-master',master,'-final',interp],products=prefix)
python
{ "resource": "" }
q44547
qwarp_align
train
def qwarp_align(dset_from,dset_to,skull_strip=True,mask=None,affine_suffix='_aff',suffix='_qwarp',prefix=None): '''aligns ``dset_from`` to ``dset_to`` using 3dQwarp Will run ``3dSkullStrip`` (unless ``skull_strip`` is ``False``), ``3dUnifize``, ``3dAllineate``, and then ``3dQwarp``. This method will add suffixes to the input dataset for the intermediate files (e.g., ``_ss``, ``_u``). If those files already exist, it will assume they were intelligently named, and use them as is :skull_strip: If True/False, turns skull-stripping of both datasets on/off. If a string matching ``dset_from`` or ``dset_to``, will only skull-strip the given dataset :mask: Applies the given mask to the alignment. Because of the nature of the alignment algorithms, the mask is **always** applied to the ``dset_to``. If this isn't what you want, you need to reverse the transform and re-apply it (e.g., using :meth:`qwarp_invert` and :meth:`qwarp_apply`). If the ``dset_to`` dataset is skull-stripped, the mask will also be resampled to match the ``dset_to`` grid. :affine_suffix: Suffix applied to ``dset_from`` to name the new dataset, as well as the ``.1D`` file. :suffix: Suffix applied to the final ``dset_from`` dataset. An additional file with the additional suffix ``_WARP`` will be created containing the parameters (e.g., with the default ``_qwarp`` suffix, the parameters will be in a file with the suffix ``_qwarp_WARP``) :prefix: Alternatively to ``suffix``, explicitly give the full output filename The output affine dataset and 1D, as well as the output of qwarp are named by adding the given suffixes (``affine_suffix`` and ``qwarp_suffix``) to the ``dset_from`` file If ``skull_strip`` is a string instead of ``True``/``False``, it will only skull strip the given dataset instead of both of them # TODO: currently does not work with +tlrc datasets because the filenames get mangled ''' dset_ss = lambda dset: os.path.split(nl.suffix(dset,'_ns'))[1] dset_u = lambda dset: os.path.split(nl.suffix(dset,'_u'))[1] def dset_source(dset): if skull_strip==True or skull_strip==dset: return dset_ss(dset) else: return dset dset_affine = os.path.split(nl.suffix(dset_from,affine_suffix))[1] dset_affine_1D = nl.prefix(dset_affine) + '.1D' dset_qwarp = prefix if dset_qwarp==None: dset_qwarp = os.path.split(nl.suffix(dset_from,suffix))[1] if os.path.exists(dset_qwarp): # final product already exists return affine_align(dset_from,dset_to,skull_strip,mask,affine_suffix) for dset in [dset_from,dset_to]: nl.run([ '3dUnifize', '-prefix', dset_u(dset_source(dset)), '-input', dset_source(dset) ],products=[dset_u(dset_source(dset))]) mask_use = mask if mask: # the mask was probably made in the space of the original dset_to anatomy, # which has now been cropped from the skull stripping. So the lesion mask # needs to be resampled to match the corresponding mask if skull_strip==True or skull_strip==dset_to: nl.run(['3dresample','-master',dset_u(dset_ss(dset)),'-inset',mask,'-prefix',nl.suffix(mask,'_resam')],products=nl.suffix(mask,'_resam')) mask_use = nl.suffix(mask,'_resam') warp_cmd = [ '3dQwarp', '-prefix', dset_qwarp, '-duplo', '-useweight', '-blur', '0', '3', '-iwarp', '-base', dset_u(dset_source(dset_to)), '-source', dset_affine ] if mask: warp_cmd += ['-emask', mask_use] nl.run(warp_cmd,products=dset_qwarp)
python
{ "resource": "" }
q44548
qwarp_apply
train
def qwarp_apply(dset_from,dset_warp,affine=None,warp_suffix='_warp',master='WARP',interp=None,prefix=None): '''applies the transform from a previous qwarp Uses the warp parameters from the dataset listed in ``dset_warp`` (usually the dataset name ends in ``_WARP``) to the dataset ``dset_from``. If a ``.1D`` file is given in the ``affine`` parameter, it will be applied simultaneously with the qwarp. If the parameter ``interp`` is given, will use as interpolation method, otherwise it will just use the default (currently wsinc5) Output dataset with have the ``warp_suffix`` suffix added to its name ''' out_dset = prefix if out_dset==None: out_dset = os.path.split(nl.suffix(dset_from,warp_suffix))[1] dset_from_info = nl.dset_info(dset_from) dset_warp_info = nl.dset_info(dset_warp) if(dset_from_info.orient!=dset_warp_info.orient): # If the datasets are different orientations, the transform won't be applied correctly nl.run(['3dresample','-orient',dset_warp_info.orient,'-prefix',nl.suffix(dset_from,'_reorient'),'-inset',dset_from],products=nl.suffix(dset_from,'_reorient')) dset_from = nl.suffix(dset_from,'_reorient') warp_opt = str(dset_warp) if affine: warp_opt += ' ' + affine cmd = [ '3dNwarpApply', '-nwarp', warp_opt] cmd += [ '-source', dset_from, '-master',master, '-prefix', out_dset ] if interp: cmd += ['-interp',interp] nl.run(cmd,products=out_dset)
python
{ "resource": "" }
q44549
qwarp_epi
train
def qwarp_epi(dset,align_subbrick=5,suffix='_qwal',prefix=None): '''aligns an EPI time-series using 3dQwarp Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant distortions due to motion''' info = nl.dset_info(dset) if info==None: nl.notify('Error reading dataset "%s"' % (dset),level=nl.level.error) return False if prefix==None: prefix = nl.suffix(dset,suffix) dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset),x) try: align_dset = nl.suffix(dset_sub(align_subbrick),'_warp') nl.calc('%s[%d]' % (dset,align_subbrick),expr='a',prefix=align_dset,datum='float') for i in xrange(info.reps): if i != align_subbrick: nl.calc('%s[%d]' % (dset,i),expr='a',prefix=dset_sub(i),datum='float') nl.run([ '3dQwarp', '-nowarp', '-workhard', '-superhard', '-minpatch', '9', '-blur', '0', '-pear', '-nopenalty', '-base', align_dset, '-source', dset_sub(i), '-prefix', nl.suffix(dset_sub(i),'_warp') ],quiet=True) cmd = ['3dTcat','-prefix',prefix] if info.TR: cmd += ['-tr',info.TR] if info.slice_timing: cmd += ['-tpattern',info.slice_timing] cmd += [nl.suffix(dset_sub(i),'_warp') for i in xrange(info.reps)] nl.run(cmd,quiet=True) except Exception as e: raise e finally: for i in xrange(info.reps): for suffix in ['','warp']: try: os.remove(nl.suffix(dset_sub(i),suffix)) except: pass
python
{ "resource": "" }
q44550
align_epi_anat
train
def align_epi_anat(anatomy,epi_dsets,skull_strip_anat=True): ''' aligns epis to anatomy using ``align_epi_anat.py`` script :epi_dsets: can be either a string or list of strings of the epi child datasets :skull_strip_anat: if ``True``, ``anatomy`` will be skull-stripped using the default method The default output suffix is "_al" ''' if isinstance(epi_dsets,basestring): epi_dsets = [epi_dsets] if len(epi_dsets)==0: nl.notify('Warning: no epi alignment datasets given for anatomy %s!' % anatomy,level=nl.level.warning) return if all(os.path.exists(nl.suffix(x,'_al')) for x in epi_dsets): return anatomy_use = anatomy if skull_strip_anat: nl.skull_strip(anatomy,'_ns') anatomy_use = nl.suffix(anatomy,'_ns') inputs = [anatomy_use] + epi_dsets dset_products = lambda dset: [nl.suffix(dset,'_al'), nl.prefix(dset)+'_al_mat.aff12.1D', nl.prefix(dset)+'_tsh_vr_motion.1D'] products = nl.flatten([dset_products(dset) for dset in epi_dsets]) with nl.run_in_tmp(inputs,products): if nl.is_nifti(anatomy_use): anatomy_use = nl.afni_copy(anatomy_use) epi_dsets_use = [] for dset in epi_dsets: if nl.is_nifti(dset): epi_dsets_use.append(nl.afni_copy(dset)) else: epi_dsets_use.append(dset) cmd = ["align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no", "-epi_strip", "3dAutomask","-anat", anatomy_use, "-epi_base", "5", "-epi", epi_dsets_use[0]] if len(epi_dsets_use)>1: cmd += ['-child_epi'] + epi_dsets_use[1:] out = nl.run(cmd) for dset in epi_dsets: if nl.is_nifti(dset): dset_nifti = nl.nifti_copy(nl.prefix(dset)+'_al+orig') if dset_nifti and os.path.exists(dset_nifti) and dset_nifti.endswith('.nii') and dset.endswith('.gz'): nl.run(['gzip',dset_nifti])
python
{ "resource": "" }
q44551
skullstrip_template
train
def skullstrip_template(dset,template,prefix=None,suffix=None,dilate=0): '''Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long as there is a decent amount of normal brain and the overall shape of the brain is normal-ish''' if suffix==None: suffix = '_sstemplate' if prefix==None: prefix = nl.suffix(dset,suffix) if not os.path.exists(prefix): with nl.notify('Running template-based skull-strip on %s' % dset): dset = os.path.abspath(dset) template = os.path.abspath(template) tmp_dir = tempfile.mkdtemp() cwd = os.getcwd() with nl.run_in(tmp_dir): nl.affine_align(template,dset,skull_strip=None,cost='mi',opts=['-nmatch','100%']) nl.run(['3dQwarp','-minpatch','20','-penfac','10','-noweight','-source',nl.suffix(template,'_aff'),'-base',dset,'-prefix',nl.suffix(template,'_qwarp')],products=nl.suffix(template,'_qwarp')) info = nl.dset_info(nl.suffix(template,'_qwarp')) max_value = info.subbricks[0]['max'] nl.calc([dset,nl.suffix(template,'_qwarp')],'a*step(b-%f*0.05)'%max_value,prefix) shutil.move(prefix,cwd) shutil.rmtree(tmp_dir)
python
{ "resource": "" }
q44552
printmp
train
def printmp(msg): """Print temporarily, until next print overrides it. """ filler = (80 - len(msg)) * ' ' print(msg + filler, end='\r') sys.stdout.flush()
python
{ "resource": "" }
q44553
contacts
train
def contacts(github, logins): """Extract public contact info from users. """ printmp('Fetching contacts') users = [github.user(login).as_dict() for login in logins] mails = set() blogs = set() for user in users: contact = user.get('name', 'login') if user['email']: contact += ' <%s>' % user['email'] mails.add(contact) elif user['blog']: contact += ' <%s>' % user['blog'] blogs.add(contact) else: continue return mails, blogs
python
{ "resource": "" }
q44554
extract_mail
train
def extract_mail(issues): """Extract mails that sometimes leak from issue comments. """ contacts = set() for idx, issue in enumerate(issues): printmp('Fetching issue #%s' % idx) for comment in issue.comments(): comm = comment.as_dict() emails = list(email[0] for email in re.findall(MAIL_REGEX, comm['body']) if not email[0].startswith('//') and not email[0].endswith('github.com') and '@' in email[0]) contacts |= set(emails) return contacts
python
{ "resource": "" }
q44555
fetch_logins
train
def fetch_logins(roles, repo): """Fetch logins for users with given roles. """ users = set() if 'stargazer' in roles: printmp('Fetching stargazers') users |= set(repo.stargazers()) if 'collaborator' in roles: printmp('Fetching collaborators') users |= set(repo.collaborators()) if 'issue' in roles: printmp('Fetching issues creators') users |= set([i.user for i in repo.issues(state='all')]) return users
python
{ "resource": "" }
q44556
high_cli
train
def high_cli(repo_name, login, with_blog, as_list, role): """Extract mails from stargazers, collaborators and people involved with issues of given repository. """ passw = getpass.getpass() github = gh_login(login, passw) repo = github.repository(login, repo_name) role = [ROLES[k] for k in role] users = fetch_logins(role, repo) mails, blogs = contacts(github, users) if 'issue' in role: mails |= extract_mail(repo.issues(state='all')) # Print results sep = ', ' if as_list else '\n' print(sep.join(mails)) if with_blog: print(sep.join(blogs))
python
{ "resource": "" }
q44557
HeraldBot.__callback
train
def __callback(self, data): """ Safely calls back a method :param data: Associated stanza """ method = self.__cb_message if method is not None: try: method(data) except Exception as ex: _logger.exception("Error calling method: %s", ex)
python
{ "resource": "" }
q44558
HeraldBot.__on_message
train
def __on_message(self, msg): """ XMPP message received """ msgtype = msg['type'] msgfrom = msg['from'] if msgtype == 'groupchat': # MUC Room chat if self._nick == msgfrom.resource: # Loopback message return elif msgtype not in ('normal', 'chat'): # Ignore non-chat messages return # Callback self.__callback(msg)
python
{ "resource": "" }
q44559
freeze
train
def freeze(ctx, version: str, clean: bool): """ Freeze current package into a single file """ if clean: _clean_spec() ctx.invoke(epab.cmd.compile_qt_resources) _freeze(version)
python
{ "resource": "" }
q44560
_ztanh
train
def _ztanh(Np: int, gridmin: float, gridmax: float) -> np.ndarray: """ typically call via setupz instead """ x0 = np.linspace(0, 3.14, Np) # arbitrarily picking 3.14 as where tanh gets to 99% of asymptote return np.tanh(x0)*gridmax+gridmin
python
{ "resource": "" }
q44561
SiteNotifications.get
train
def get(self, name): """Returns a Notification by name. """ if not self.loaded: raise RegistryNotLoaded(self) if not self._registry.get(name): raise NotificationNotRegistered( f"Notification not registered. Got '{name}'." ) return self._registry.get(name)
python
{ "resource": "" }
q44562
SiteNotifications.register
train
def register(self, notification_cls=None): """Registers a Notification class unique by name. """ self.loaded = True display_names = [n.display_name for n in self.registry.values()] if ( notification_cls.name not in self.registry and notification_cls.display_name not in display_names ): self.registry.update({notification_cls.name: notification_cls}) models = getattr(notification_cls, "models", []) if not models and getattr(notification_cls, "model", None): models = [getattr(notification_cls, "model")] for model in models: try: if notification_cls.name not in [ n.name for n in self.models[model] ]: self.models[model].append(notification_cls) except KeyError: self.models.update({model: [notification_cls]}) else: raise AlreadyRegistered( f"Notification {notification_cls.name}: " f"{notification_cls.display_name} is already registered." )
python
{ "resource": "" }
q44563
SiteNotifications.notify
train
def notify(self, instance=None, **kwargs): """A wrapper to call notification.notify for each notification class associated with the given model instance. Returns a dictionary of {notification.name: model, ...} including only notifications sent. """ notified = {} for notification_cls in self.registry.values(): notification = notification_cls() if notification.notify(instance=instance, **kwargs): notified.update({notification_cls.name: instance._meta.label_lower}) return notified
python
{ "resource": "" }
q44564
SiteNotifications.update_notification_list
train
def update_notification_list(self, apps=None, schema_editor=None, verbose=False): """Updates the notification model to ensure all registered notifications classes are listed. Typically called from a post_migrate signal. Also, in tests you can register a notification and the Notification class (not model) will automatically call this method if the named notification does not exist. See notification.notify() """ Notification = (apps or django_apps).get_model("edc_notification.notification") # flag all notifications as disabled and re-enable as required Notification.objects.all().update(enabled=False) if site_notifications.loaded: if verbose: sys.stdout.write( style.MIGRATE_HEADING("Populating Notification model:\n") ) self.delete_unregistered_notifications(apps=apps) for name, notification_cls in site_notifications.registry.items(): if verbose: sys.stdout.write( f" * Adding '{name}': '{notification_cls().display_name}'\n" ) try: obj = Notification.objects.get(name=name) except ObjectDoesNotExist: Notification.objects.create( name=name, display_name=notification_cls().display_name, enabled=True, ) else: obj.display_name = notification_cls().display_name obj.enabled = True obj.save()
python
{ "resource": "" }
q44565
SiteNotifications.delete_unregistered_notifications
train
def delete_unregistered_notifications(self, apps=None): """Delete orphaned notification model instances. """ Notification = (apps or django_apps).get_model("edc_notification.notification") return Notification.objects.exclude( name__in=[n.name for n in site_notifications.registry.values()] ).delete()
python
{ "resource": "" }
q44566
SiteNotifications.create_mailing_lists
train
def create_mailing_lists(self, verbose=True): """Creates the mailing list for each registered notification. """ responses = {} if ( settings.EMAIL_ENABLED and self.loaded and settings.EMAIL_BACKEND != "django.core.mail.backends.locmem.EmailBackend" ): sys.stdout.write(style.MIGRATE_HEADING(f"Creating mailing lists:\n")) for name, notification_cls in self.registry.items(): message = None notification = notification_cls() manager = MailingListManager( address=notification.email_to, name=notification.name, display_name=notification.display_name, ) try: response = manager.create() except ConnectionError as e: sys.stdout.write( style.ERROR( f" * Failed to create mailing list {name}. " f"Got {e}\n" ) ) else: if verbose: try: message = response.json().get("message") except JSONDecodeError: message = response.text sys.stdout.write( f" * Creating mailing list {name}. " f'Got {response.status_code}: "{message}"\n' ) responses.update({name: response}) return responses
python
{ "resource": "" }
q44567
SiteNotifications.autodiscover
train
def autodiscover(self, module_name=None, verbose=False): """Autodiscovers classes in the notifications.py file of any INSTALLED_APP. """ module_name = module_name or "notifications" verbose = True if verbose is None else verbose sys.stdout.write(f" * checking for {module_name} ...\n") for app in django_apps.app_configs: try: mod = import_module(app) try: before_import_registry = copy.copy(site_notifications._registry) import_module(f"{app}.{module_name}") if verbose: sys.stdout.write( f" * registered notifications from application '{app}'\n" ) except Exception as e: if f"No module named '{app}.{module_name}'" not in str(e): site_notifications._registry = before_import_registry if module_has_submodule(mod, module_name): raise except ModuleNotFoundError: pass
python
{ "resource": "" }
q44568
CiprCfg.add_package
train
def add_package(self, package): """ Add a package to this project """ self._data.setdefault('packages', {}) self._data['packages'][package.name] = package.source for package in package.deploy_packages: self.add_package(package) self._save()
python
{ "resource": "" }
q44569
touch
train
def touch(): """ Create a .vacationrc file if none exists. """ if not os.path.isfile(get_rc_path()): open(get_rc_path(), 'a').close() print('Created file: {}'.format(get_rc_path()))
python
{ "resource": "" }
q44570
write
train
def write(entries): """ Write an entire rc file. """ try: with open(get_rc_path(), 'w') as rc: rc.writelines(entries) except IOError: print('Error writing your ~/.vacationrc file!')
python
{ "resource": "" }
q44571
append
train
def append(entry): """ Append either a list of strings or a string to our file. """ if not entry: return try: with open(get_rc_path(), 'a') as f: if isinstance(entry, list): f.writelines(entry) else: f.write(entry + '\n') except IOError: print('Error writing your ~/.vacationrc file!')
python
{ "resource": "" }
q44572
delete
train
def delete(bad_entry): """ Removes an entry from rc file. """ entries = read() kept_entries = [x for x in entries if x.rstrip() != bad_entry] write(kept_entries)
python
{ "resource": "" }
q44573
sort_func
train
def sort_func(variant=VARIANT1, case_sensitive=False): """A function generator that can be used for sorting. All keywords are passed to `normalize()` and generate keywords that can be passed to `sorted()`:: >>> key = sort_func() >>> print(sorted(["fur", "far"], key=key)) [u'far', u'fur'] Please note, that `sort_func` returns a function. """ return lambda x: normalize( x, variant=variant, case_sensitive=case_sensitive)
python
{ "resource": "" }
q44574
fetcher
train
def fetcher(date=datetime.today(), url_pattern=URL_PATTERN): """ Fetch json data from n.pl Args: date (date) - default today url_patter (string) - default URL_PATTERN Returns: dict - data from api """ api_url = url_pattern % date.strftime('%Y-%m-%d') headers = {'Referer': 'http://n.pl/program-tv'} raw_result = requests.get(api_url, headers=headers).json() return raw_result
python
{ "resource": "" }
q44575
result_to_dict
train
def result_to_dict(raw_result): """ Parse raw result from fetcher into readable dictionary Args: raw_result (dict) - raw data from `fetcher` Returns: dict - readable dictionary """ result = {} for channel_index, channel in enumerate(raw_result): channel_id, channel_name = channel[0], channel[1] channel_result = { 'id': channel_id, 'name': channel_name, 'movies': [] } for movie in channel[2]: channel_result['movies'].append({ 'title': movie[1], 'start_time': datetime.fromtimestamp(movie[2]), 'end_time': datetime.fromtimestamp(movie[2] + movie[3]), 'inf': True if movie[3] else False, }) result[channel_id] = channel_result return result
python
{ "resource": "" }
q44576
TaskManager.waitForCompletion
train
def waitForCompletion (self): """Wait for all threads to complete their work The worker threads are told to quit when they receive a task that is a tuple of (None, None). This routine puts as many of those tuples in the task queue as there are threads. As soon as a thread receives one of these tuples, it dies. """ for x in range(self.numberOfThreads): self.taskQueue.put((None, None)) for t in self.threadList: # print "attempting to join %s" % t.getName() t.join()
python
{ "resource": "" }
q44577
SIG.token
train
def token(cls: Type[SIGType], pubkey: str) -> SIGType: """ Return SIG instance from pubkey :param pubkey: Public key of the signature issuer :return: """ sig = cls() sig.pubkey = pubkey return sig
python
{ "resource": "" }
q44578
CSV.token
train
def token(cls: Type[CSVType], time: int) -> CSVType: """ Return CSV instance from time :param time: Timestamp :return: """ csv = cls() csv.time = str(time) return csv
python
{ "resource": "" }
q44579
CLTV.token
train
def token(cls: Type[CLTVType], timestamp: int) -> CLTVType: """ Return CLTV instance from timestamp :param timestamp: Timestamp :return: """ cltv = cls() cltv.timestamp = str(timestamp) return cltv
python
{ "resource": "" }
q44580
XHX.token
train
def token(cls: Type[XHXType], sha_hash: str) -> XHXType: """ Return XHX instance from sha_hash :param sha_hash: SHA256 hash :return: """ xhx = cls() xhx.sha_hash = sha_hash return xhx
python
{ "resource": "" }
q44581
Operator.token
train
def token(cls: Type[OperatorType], keyword: str) -> OperatorType: """ Return Operator instance from keyword :param keyword: Operator keyword in expression :return: """ op = cls(keyword) return op
python
{ "resource": "" }
q44582
Condition.token
train
def token(cls: Type[ConditionType], left: Any, op: Optional[Any] = None, right: Optional[Any] = None) -> ConditionType: """ Return Condition instance from arguments and Operator :param left: Left argument :param op: Operator :param right: Right argument :return: """ condition = cls() condition.left = left if op: condition.op = op if right: condition.right = right return condition
python
{ "resource": "" }
q44583
Condition.compose
train
def compose(self, parser: Any, grammar: Any = None, attr_of: str = None) -> str: """ Return the Condition as string format :param parser: Parser instance :param grammar: Grammar :param attr_of: Attribute of... """ if type(self.left) is Condition: left = "({0})".format(parser.compose(self.left, grammar=grammar, attr_of=attr_of)) else: left = parser.compose(self.left, grammar=grammar, attr_of=attr_of) if getattr(self, 'op', None): if type(self.right) is Condition: right = "({0})".format(parser.compose(self.right, grammar=grammar, attr_of=attr_of)) else: right = parser.compose(self.right, grammar=grammar, attr_of=attr_of) op = parser.compose(self.op, grammar=grammar, attr_of=attr_of) result = "{0} {1} {2}".format(left, op, right) else: result = left return result
python
{ "resource": "" }
q44584
Parser.load_library
train
def load_library(self, path): ''' Load a template library into the state. ''' module = import_module(path) self.tags.update(module.register.tags) self.helpers.update(module.register.helpers)
python
{ "resource": "" }
q44585
Connection.get_default_logger
train
def get_default_logger(): """Returns default driver logger. :return: logger instance :rtype: logging.Logger """ handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) handler.setFormatter(logging.Formatter( "[%(levelname)1.1s %(asctime)s %(name)s] %(message)s", "%y%m%d %H:%M:%S")) logger_name = "pydbal" if Connection._instance_count > 1: logger_name += ":" + str(Connection._instance_count) logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG) logger.addHandler(handler) return logger
python
{ "resource": "" }
q44586
Connection.ensure_connected
train
def ensure_connected(self): """Ensures database connection is still open.""" if not self.is_connected(): if not self._auto_connect: raise DBALConnectionError.connection_closed() self.connect()
python
{ "resource": "" }
q44587
Connection.query
train
def query(self, sql, *args, **kwargs): """Executes an SQL SELECT query, returning a result set as a Statement object. :param sql: query to execute :param args: parameters iterable :param kwargs: parameters iterable :return: result set as a Statement object :rtype: pydbal.statement.Statement """ self.ensure_connected() stmt = Statement(self) stmt.execute(sql, *args, **kwargs) return stmt
python
{ "resource": "" }
q44588
Connection.begin_transaction
train
def begin_transaction(self): """Starts a transaction by suspending auto-commit mode.""" self.ensure_connected() self._transaction_nesting_level += 1 if self._transaction_nesting_level == 1: self._driver.begin_transaction() elif self._nest_transactions_with_savepoints: self.create_savepoint(self._get_nested_transaction_savepoint_name())
python
{ "resource": "" }
q44589
Connection.commit
train
def commit(self): """Commits the current transaction.""" if self._transaction_nesting_level == 0: raise DBALConnectionError.no_active_transaction() if self._is_rollback_only: raise DBALConnectionError.commit_failed_rollback_only() self.ensure_connected() if self._transaction_nesting_level == 1: self._driver.commit() elif self._nest_transactions_with_savepoints: self.release_savepoint(self._get_nested_transaction_savepoint_name()) self._transaction_nesting_level -= 1 if not self._auto_commit and self._transaction_nesting_level == 0: self.begin_transaction()
python
{ "resource": "" }
q44590
Connection.commit_all
train
def commit_all(self): """Commits all current nesting transactions.""" while self._transaction_nesting_level != 0: if not self._auto_commit and self._transaction_nesting_level == 1: return self.commit() self.commit()
python
{ "resource": "" }
q44591
Connection.rollback
train
def rollback(self): """Cancels any database changes done during the current transaction.""" if self._transaction_nesting_level == 0: raise DBALConnectionError.no_active_transaction() self.ensure_connected() if self._transaction_nesting_level == 1: self._transaction_nesting_level = 0 self._driver.rollback() self._is_rollback_only = False if not self._auto_commit: self.begin_transaction() elif self._nest_transactions_with_savepoints: self.rollback_savepoint(self._get_nested_transaction_savepoint_name()) self._transaction_nesting_level -= 1 else: self._is_rollback_only = True self._transaction_nesting_level -= 1
python
{ "resource": "" }
q44592
Connection.transaction
train
def transaction(self, callback): """Executes a function in a transaction. The function gets passed this Connection instance as an (optional) parameter. If an exception occurs during execution of the function or transaction commit, the transaction is rolled back and the exception re-thrown. :param callback: the function to execute in a transaction :return: the value returned by the `callback` :raise: Exception """ self.begin_transaction() try: result = callback(self) self.commit() return result except: self.rollback() raise
python
{ "resource": "" }
q44593
Connection.set_auto_commit
train
def set_auto_commit(self, auto_commit): """Sets auto-commit mode for this connection. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode. NOTE: If this method is called during a transaction and the auto-commit mode is changed, the transaction is committed. If this method is called and the auto-commit mode is not changed, the call is a no-op. :param auto_commit: `True` to enable auto-commit mode; `False` to disable it """ auto_commit = bool(auto_commit) if auto_commit == self._auto_commit: return self._auto_commit = auto_commit if self.is_connected() and self._transaction_nesting_level != 0: self.commit_all()
python
{ "resource": "" }
q44594
Connection.set_transaction_isolation
train
def set_transaction_isolation(self, level): """Sets the transaction isolation level. :param level: the level to set """ self.ensure_connected() self._transaction_isolation_level = level self._platform.set_transaction_isolation(level)
python
{ "resource": "" }
q44595
Connection.get_transaction_isolation
train
def get_transaction_isolation(self): """Returns the currently active transaction isolation level. :return: the current transaction isolation level :rtype: int """ if self._transaction_isolation_level is None: self._transaction_isolation_level = self._platform.get_default_transaction_isolation_level() return self._transaction_isolation_level
python
{ "resource": "" }
q44596
Connection.set_nest_transactions_with_savepoints
train
def set_nest_transactions_with_savepoints(self, nest_transactions_with_savepoints): """Sets if nested transactions should use savepoints. :param nest_transactions_with_savepoints: `True` or `False` """ if self._transaction_nesting_level > 0: raise DBALConnectionError.may_not_alter_nested_transaction_with_savepoints_in_transaction() if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() self._nest_transactions_with_savepoints = bool(nest_transactions_with_savepoints)
python
{ "resource": "" }
q44597
Connection.create_savepoint
train
def create_savepoint(self, savepoint): """Creates a new savepoint. :param savepoint: the name of the savepoint to create :raise: pydbal.exception.DBALConnectionError """ if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() self.ensure_connected() self._platform.create_savepoint(savepoint)
python
{ "resource": "" }
q44598
Connection.release_savepoint
train
def release_savepoint(self, savepoint): """Releases the given savepoint. :param savepoint: the name of the savepoint to release :raise: pydbal.exception.DBALConnectionError """ if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() if self._platform.is_release_savepoints_supported(): self.ensure_connected() self._platform.release_savepoint(savepoint)
python
{ "resource": "" }
q44599
Connection.rollback_savepoint
train
def rollback_savepoint(self, savepoint): """Rolls back to the given savepoint. :param savepoint: the name of the savepoint to rollback to :raise: pydbal.exception.DBALConnectionError """ if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() self.ensure_connected() self._platform.rollback_savepoint(savepoint)
python
{ "resource": "" }